^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * NUMA emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "numa_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static int emu_nid_to_phys[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static char *emu_cmdline __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int __init numa_emu_cmdline(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) emu_cmdline = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) for (i = 0; i < mi->nr_blks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (mi->blk[i].nid == nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static u64 __init mem_hole_size(u64 start, u64 end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long start_pfn = PFN_UP(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long end_pfn = PFN_DOWN(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (start_pfn < end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Sets up nid to range from @start to @end. The return value is -errno if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * something went wrong, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int __init emu_setup_memblk(struct numa_meminfo *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct numa_meminfo *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int nid, int phys_blk, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct numa_memblk *eb = &ei->blk[ei->nr_blks];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct numa_memblk *pb = &pi->blk[phys_blk];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (ei->nr_blks >= NR_NODE_MEMBLKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pr_err("NUMA: Too many emulated memblks, failing emulation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ei->nr_blks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) eb->start = pb->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) eb->end = pb->start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) eb->nid = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) emu_nid_to_phys[nid] = pb->nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pb->start += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (pb->start >= pb->end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) WARN_ON_ONCE(pb->start > pb->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) numa_remove_memblk_from(phys_blk, pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * to max_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Returns zero on success or negative on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int __init split_nodes_interleave(struct numa_meminfo *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct numa_meminfo *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u64 addr, u64 max_addr, int nr_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) nodemask_t physnode_mask = numa_nodes_parsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int big;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (nr_nodes <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (nr_nodes > MAX_NUMNODES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pr_info("numa=fake=%d too large, reducing to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) nr_nodes, MAX_NUMNODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) nr_nodes = MAX_NUMNODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Calculate target node size. x86_32 freaks on __udivdi3() so do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * the division in ulong number of pages and convert back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size = max_addr - addr - mem_hole_size(addr, max_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Calculate the number of big nodes that can be allocated as a result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * of consolidating the remainder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) FAKE_NODE_MIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) size &= FAKE_NODE_MIN_HASH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pr_err("Not enough memory for each node. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "NUMA emulation disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Continue to fill physical nodes with fake nodes until there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * memory left on any of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) while (nodes_weight(physnode_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) for_each_node_mask(i, physnode_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u64 start, limit, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int phys_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) phys_blk = emu_find_memblk_by_nid(i, pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (phys_blk < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) node_clear(i, physnode_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) start = pi->blk[phys_blk].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) limit = pi->blk[phys_blk].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (nid < big)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) end += FAKE_NODE_MIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Continue to add memory to this fake node if its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * non-reserved memory is less than the per-node size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) while (end - start - mem_hole_size(start, end) < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) end += FAKE_NODE_MIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (end > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) end = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * If there won't be at least FAKE_NODE_MIN_SIZE of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * non-reserved memory in ZONE_DMA32 for the next node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * this one must extend to the boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (end < dma32_end && dma32_end - end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) end = dma32_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * If there won't be enough non-reserved memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * next node, this one must extend to the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * physical node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (limit - end - mem_hole_size(end, limit) < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) end = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) phys_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) min(end, limit) - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Returns the end address of a node so that there is at least `size' amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * non-reserved memory or `max_addr' is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u64 end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) while (end - start - mem_hole_size(start, end) < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) end += FAKE_NODE_MIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (end > max_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) end = max_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned long max_pfn = PHYS_PFN(max_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long base_pfn = PHYS_PFN(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned long hole_pfns = PHYS_PFN(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Sets up fake nodes of `size' interleaved over physical nodes ranging from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * `addr' to `max_addr'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Returns zero on success or negative on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct numa_meminfo *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u64 addr, u64 max_addr, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int nr_nodes, struct numa_memblk *pblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) nodemask_t physnode_mask = numa_nodes_parsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int i, ret, uniform = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u64 min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if ((!size && !nr_nodes) || (nr_nodes && !pblk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * In the 'uniform' case split the passed in physical node by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * nr_nodes, in the non-uniform case, ignore the passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * physical block and try to create nodes of at least size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * In the uniform case, split the nodes strictly by physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * capacity, i.e. ignore holes. In the non-uniform case account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * for holes and treat @size as a minimum floor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!nr_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) nr_nodes = MAX_NUMNODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) nodes_clear(physnode_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) node_set(pblk->nid, physnode_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) uniform = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (uniform) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) min_size = uniform_size(max_addr, addr, 0, nr_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) size = min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * The limit on emulated nodes is MAX_NUMNODES, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * size per node is increased accordingly if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * requested size is too small. This creates a uniform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * distribution of node sizes across the entire machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * (but not necessarily over physical nodes).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) min_size = uniform_size(max_addr, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mem_hole_size(addr, max_addr), nr_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (size < min_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) size >> 20, min_size >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) size = min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Fill physical nodes with fake nodes of size until there is no memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * left on any of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) while (nodes_weight(physnode_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for_each_node_mask(i, physnode_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u64 start, limit, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int phys_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) phys_blk = emu_find_memblk_by_nid(i, pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (phys_blk < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) node_clear(i, physnode_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) start = pi->blk[phys_blk].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) limit = pi->blk[phys_blk].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (uniform)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) end = find_end_of_node(start, limit, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * If there won't be at least FAKE_NODE_MIN_SIZE of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * non-reserved memory in ZONE_DMA32 for the next node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * this one must extend to the boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (end < dma32_end && dma32_end - end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) end = dma32_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * If there won't be enough non-reserved memory for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * next node, this one must extend to the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * physical node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if ((limit - end - mem_hole_size(end, limit) < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) && !uniform)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) end = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) phys_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) min(end, limit) - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct numa_meminfo *pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u64 addr, u64 max_addr, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int __init setup_emu2phys_nid(int *dfl_phys_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int i, max_emu_nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *dfl_phys_nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (emu_nid_to_phys[i] != NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) max_emu_nid = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (*dfl_phys_nid == NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *dfl_phys_nid = emu_nid_to_phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return max_emu_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * numa_emulation - Emulate NUMA nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @numa_meminfo: NUMA configuration to massage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @numa_dist_cnt: The size of the physical NUMA distance table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Emulate NUMA nodes according to the numa=fake kernel parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @numa_meminfo contains the physical memory configuration and is modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * to reflect the emulated configuration on success. @numa_dist_cnt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * used to determine the size of the physical distance table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * On success, the following modifications are made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * - @numa_meminfo is updated to reflect the emulated nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * - __apicid_to_node[] is updated such that APIC IDs are mapped to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * emulated nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * - NUMA distance table is rebuilt to represent distances between emulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * nodes. The distances are determined considering how emulated nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * are mapped to physical nodes and match the actual distances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * - emu_nid_to_phys[] reflects how emulated nodes are mapped to physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * nodes. This is used by numa_add_cpu() and numa_remove_cpu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * If emulation is not enabled or fails, emu_nid_to_phys[] is filled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * identity mapping and no other modification is made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static struct numa_meminfo ei __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static struct numa_meminfo pi __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) const u64 max_addr = PFN_PHYS(max_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u8 *phys_dist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int max_emu_nid, dfl_phys_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int i, j, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!emu_cmdline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto no_emu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) memset(&ei, 0, sizeof(ei));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pi = *numa_meminfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (i = 0; i < MAX_NUMNODES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) emu_nid_to_phys[i] = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * If the numa=fake command-line contains a 'M' or 'G', it represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * the fixed node size. Otherwise, if it is just a single number N,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * split the system RAM into N fake nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (strchr(emu_cmdline, 'U')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) nodemask_t physnode_mask = numa_nodes_parsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) for_each_node_mask(i, physnode_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * The reason we pass in blk[0] is due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * numa_remove_memblk_from() called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * emu_setup_memblk() will delete entry 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * and then move everything else up in the pi.blk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * array. Therefore we should always be looking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * at blk[0].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret = split_nodes_size_interleave_uniform(&ei, &pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pi.blk[0].start, pi.blk[0].end, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) n, &pi.blk[0], nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (ret < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) __func__, i, ret, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) nid = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) } else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) size = memparse(emu_cmdline, &emu_cmdline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (*emu_cmdline == ':')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) emu_cmdline++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) goto no_emu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (numa_cleanup_meminfo(&ei) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pr_warn("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto no_emu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* copy the physical distance table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (numa_dist_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) phys_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto no_emu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) memblock_reserve(phys, phys_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) phys_dist = __va(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) for (i = 0; i < numa_dist_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) for (j = 0; j < numa_dist_cnt; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) phys_dist[i * numa_dist_cnt + j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) node_distance(i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Determine the max emulated nid and the default phys nid to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * for unmapped nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) max_emu_nid = setup_emu2phys_nid(&dfl_phys_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* commit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) *numa_meminfo = ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Make sure numa_nodes_parsed only contains emulated nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) nodes_clear(numa_nodes_parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) for (i = 0; i < ARRAY_SIZE(ei.blk); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (ei.blk[i].start != ei.blk[i].end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ei.blk[i].nid != NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) node_set(ei.blk[i].nid, numa_nodes_parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * Transform __apicid_to_node table to use emulated nids by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * reverse-mapping phys_nid. The maps should always exist but fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * back to zero just in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (__apicid_to_node[i] == NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (__apicid_to_node[i] == emu_nid_to_phys[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) __apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* make sure all emulated nodes are mapped to a physical node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (emu_nid_to_phys[i] == NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) emu_nid_to_phys[i] = dfl_phys_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* transform distance table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) numa_reset_distance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) for (i = 0; i < max_emu_nid + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) for (j = 0; j < max_emu_nid + 1; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int physi = emu_nid_to_phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int physj = emu_nid_to_phys[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (get_option(&emu_cmdline, &dist) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) else if (physi >= numa_dist_cnt || physj >= numa_dist_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dist = physi == physj ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) LOCAL_DISTANCE : REMOTE_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dist = phys_dist[physi * numa_dist_cnt + physj];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) numa_set_distance(i, j, dist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* free the copied physical distance table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (phys_dist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) memblock_free(__pa(phys_dist), phys_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) no_emu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) emu_nid_to_phys[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #ifndef CONFIG_DEBUG_PER_CPU_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void numa_add_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int physnid, nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) nid = early_cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) physnid = emu_nid_to_phys[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Map the cpu to each emulated node that is allocated on the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * node of the cpu's apic id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) for_each_online_node(nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (emu_nid_to_phys[nid] == physnid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) void numa_remove_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) for_each_online_node(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static void numa_set_cpumask(int cpu, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int nid, physnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) nid = early_cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (nid == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* early_cpu_to_node() already emits a warning and trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) physnid = emu_nid_to_phys[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) for_each_online_node(nid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (emu_nid_to_phys[nid] != physnid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) debug_cpumask_set_cpu(cpu, nid, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void numa_add_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) numa_set_cpumask(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) void numa_remove_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) numa_set_cpumask(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */