Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2019, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Heterogeneous Memory Attributes Table (HMAT) representation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This program parses and reports the platform's HMAT tables, and registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * the applicable attributes with the node's interfaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define pr_fmt(fmt) "acpi/hmat: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define dev_fmt(fmt) "acpi/hmat: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/list_sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/memregion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/node.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static u8 hmat_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static int hmat_disable __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) void __init disable_hmat(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	hmat_disable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static LIST_HEAD(targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static LIST_HEAD(initiators);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static LIST_HEAD(localities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static DEFINE_MUTEX(target_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * The defined enum order is used to prioritize attributes to break ties when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * selecting the best performing node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) enum locality_types {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	WRITE_LATENCY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	READ_LATENCY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	WRITE_BANDWIDTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	READ_BANDWIDTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static struct memory_locality *localities_types[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) struct target_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct node_cache_attrs cache_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct memory_target {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	unsigned int memory_pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	unsigned int processor_pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct resource memregions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct node_hmem_attrs hmem_attrs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct list_head caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct node_cache_attrs cache_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	bool registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) struct memory_initiator {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	unsigned int processor_pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	bool has_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) struct memory_locality {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct acpi_hmat_locality *hmat_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct memory_initiator *initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	list_for_each_entry(initiator, &initiators, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		if (initiator->processor_pxm == cpu_pxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			return initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static struct memory_target *find_mem_target(unsigned int mem_pxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	struct memory_target *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	list_for_each_entry(target, &targets, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		if (target->memory_pxm == mem_pxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			return target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static __init void alloc_memory_initiator(unsigned int cpu_pxm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct memory_initiator *initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	initiator = find_mem_initiator(cpu_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (initiator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (!initiator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	initiator->processor_pxm = cpu_pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	list_add_tail(&initiator->node, &initiators);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static __init void alloc_memory_target(unsigned int mem_pxm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		resource_size_t start, resource_size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct memory_target *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	target = find_mem_target(mem_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (!target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		target = kzalloc(sizeof(*target), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		if (!target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		target->memory_pxm = mem_pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		target->processor_pxm = PXM_INVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		target->memregions = (struct resource) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			.name	= "ACPI mem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			.start	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			.end	= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			.flags	= IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		list_add_tail(&target->node, &targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		INIT_LIST_HEAD(&target->caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * There are potentially multiple ranges per PXM, so record each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * in the per-target memregions resource tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (!__request_region(&target->memregions, start, len, "memory target",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 				IORESOURCE_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 				start, start + len, mem_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static __init const char *hmat_data_type(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	case ACPI_HMAT_ACCESS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return "Access Latency";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	case ACPI_HMAT_READ_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		return "Read Latency";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	case ACPI_HMAT_WRITE_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		return "Write Latency";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	case ACPI_HMAT_ACCESS_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		return "Access Bandwidth";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	case ACPI_HMAT_READ_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		return "Read Bandwidth";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	case ACPI_HMAT_WRITE_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return "Write Bandwidth";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		return "Reserved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static __init const char *hmat_data_type_suffix(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	case ACPI_HMAT_ACCESS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	case ACPI_HMAT_READ_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	case ACPI_HMAT_WRITE_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return " nsec";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	case ACPI_HMAT_ACCESS_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	case ACPI_HMAT_READ_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	case ACPI_HMAT_WRITE_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return " MB/s";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static u32 hmat_normalize(u16 entry, u64 base, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 * Check for invalid and overflow values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (entry == 0xffff || !entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	else if (base > (UINT_MAX / (entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * Divide by the base unit for version 1, convert latency from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 * picosenonds to nanoseconds if revision 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	value = entry * base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (hmat_revision == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		if (value < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		value = DIV_ROUND_UP(value, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	} else if (hmat_revision == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		case ACPI_HMAT_ACCESS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		case ACPI_HMAT_READ_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		case ACPI_HMAT_WRITE_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			value = DIV_ROUND_UP(value, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void hmat_update_target_access(struct memory_target *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 				      u8 type, u32 value, int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	case ACPI_HMAT_ACCESS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		target->hmem_attrs[access].read_latency = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		target->hmem_attrs[access].write_latency = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	case ACPI_HMAT_READ_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		target->hmem_attrs[access].read_latency = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	case ACPI_HMAT_WRITE_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		target->hmem_attrs[access].write_latency = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	case ACPI_HMAT_ACCESS_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		target->hmem_attrs[access].read_bandwidth = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		target->hmem_attrs[access].write_bandwidth = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	case ACPI_HMAT_READ_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		target->hmem_attrs[access].read_bandwidth = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	case ACPI_HMAT_WRITE_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		target->hmem_attrs[access].write_bandwidth = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct memory_locality *loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	loc = kzalloc(sizeof(*loc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (!loc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		pr_notice_once("Failed to allocate HMAT locality\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	loc->hmat_loc = hmat_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	list_add_tail(&loc->node, &localities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	switch (hmat_loc->data_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	case ACPI_HMAT_ACCESS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		localities_types[READ_LATENCY] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		localities_types[WRITE_LATENCY] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	case ACPI_HMAT_READ_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		localities_types[READ_LATENCY] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	case ACPI_HMAT_WRITE_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		localities_types[WRITE_LATENCY] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	case ACPI_HMAT_ACCESS_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		localities_types[READ_BANDWIDTH] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		localities_types[WRITE_BANDWIDTH] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	case ACPI_HMAT_READ_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		localities_types[READ_BANDWIDTH] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	case ACPI_HMAT_WRITE_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		localities_types[WRITE_BANDWIDTH] = loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static __init int hmat_parse_locality(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				      const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct acpi_hmat_locality *hmat_loc = (void *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	struct memory_target *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	unsigned int init, targ, total_size, ipds, tpds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	u32 *inits, *targs, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	u16 *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	u8 type, mem_hier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (hmat_loc->header.length < sizeof(*hmat_loc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		pr_notice("HMAT: Unexpected locality header length: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			 hmat_loc->header.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	type = hmat_loc->data_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	ipds = hmat_loc->number_of_initiator_Pds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	tpds = hmat_loc->number_of_target_Pds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		     sizeof(*inits) * ipds + sizeof(*targs) * tpds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (hmat_loc->header.length < total_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			 hmat_loc->header.length, total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		hmat_loc->flags, hmat_data_type(type), ipds, tpds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		hmat_loc->entry_base_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	inits = (u32 *)(hmat_loc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	targs = inits + ipds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	entries = (u16 *)(targs + tpds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	for (init = 0; init < ipds; init++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		alloc_memory_initiator(inits[init]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		for (targ = 0; targ < tpds; targ++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			value = hmat_normalize(entries[init * tpds + targ],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 					       hmat_loc->entry_base_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 					       type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			pr_info("  Initiator-Target[%u-%u]:%u%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				inits[init], targs[targ], value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				hmat_data_type_suffix(type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			if (mem_hier == ACPI_HMAT_MEMORY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				target = find_mem_target(targs[targ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 				if (target && target->processor_pxm == inits[init]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					hmat_update_target_access(target, type, value, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					/* If the node has a CPU, update access 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 					if (node_state(pxm_to_node(inits[init]), N_CPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 						hmat_update_target_access(target, type, value, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (mem_hier == ACPI_HMAT_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		hmat_add_locality(hmat_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static __init int hmat_parse_cache(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				   const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct acpi_hmat_cache *cache = (void *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct memory_target *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct target_cache *tcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	u32 attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (cache->header.length < sizeof(*cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		pr_notice("HMAT: Unexpected cache header length: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			 cache->header.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	attrs = cache->cache_attributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		cache->memory_PD, cache->cache_size, attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		cache->number_of_SMBIOShandles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	target = find_mem_target(cache->memory_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (!target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (!tcache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		pr_notice_once("Failed to allocate HMAT cache info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	tcache->cache_attrs.size = cache->cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	case ACPI_HMAT_CA_DIRECT_MAPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	case ACPI_HMAT_CA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	case ACPI_HMAT_CP_WB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	case ACPI_HMAT_CP_WT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	case ACPI_HMAT_CP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	list_add_tail(&tcache->node, &target->caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 					      const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct acpi_hmat_proximity_domain *p = (void *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	struct memory_target *target = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (p->header.length != sizeof(*p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		pr_notice("HMAT: Unexpected address range header length: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			 p->header.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (hmat_revision == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			p->reserved3, p->reserved4, p->flags, p->processor_PD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			p->memory_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			p->flags, p->processor_PD, p->memory_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	    hmat_revision > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		target = find_mem_target(p->memory_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (!target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			pr_debug("HMAT: Memory Domain missing from SRAT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		int p_node = pxm_to_node(p->processor_PD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		if (p_node == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			pr_debug("HMAT: Invalid Processor Domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		target->processor_pxm = p->processor_PD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 				      const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	struct acpi_hmat_structure *hdr = (void *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	switch (hdr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	case ACPI_HMAT_TYPE_PROXIMITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		return hmat_parse_proximity_domain(header, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	case ACPI_HMAT_TYPE_LOCALITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		return hmat_parse_locality(header, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	case ACPI_HMAT_TYPE_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		return hmat_parse_cache(header, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 					  const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	struct acpi_srat_mem_affinity *ma = (void *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	if (!ma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static u32 hmat_initiator_perf(struct memory_target *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			       struct memory_initiator *initiator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			       struct acpi_hmat_locality *hmat_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	unsigned int ipds, tpds, i, idx = 0, tdx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	u32 *inits, *targs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	u16 *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	ipds = hmat_loc->number_of_initiator_Pds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	tpds = hmat_loc->number_of_target_Pds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	inits = (u32 *)(hmat_loc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	targs = inits + ipds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	entries = (u16 *)(targs + tpds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	for (i = 0; i < ipds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		if (inits[i] == initiator->processor_pxm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (i == ipds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	for (i = 0; i < tpds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		if (targs[i] == target->memory_pxm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			tdx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (i == tpds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	return hmat_normalize(entries[idx * tpds + tdx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			      hmat_loc->entry_base_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			      hmat_loc->data_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static bool hmat_update_best(u8 type, u32 value, u32 *best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	bool updated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	case ACPI_HMAT_ACCESS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	case ACPI_HMAT_READ_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	case ACPI_HMAT_WRITE_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		if (!*best || *best > value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			*best = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			updated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	case ACPI_HMAT_ACCESS_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	case ACPI_HMAT_READ_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	case ACPI_HMAT_WRITE_BANDWIDTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		if (!*best || *best < value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			*best = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			updated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	return updated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	struct memory_initiator *ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct memory_initiator *ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	unsigned long *p_nodes = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	ia = list_entry(a, struct memory_initiator, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	ib = list_entry(b, struct memory_initiator, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	set_bit(ia->processor_pxm, p_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	set_bit(ib->processor_pxm, p_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	return ia->processor_pxm - ib->processor_pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void hmat_register_target_initiators(struct memory_target *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	struct memory_initiator *initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	unsigned int mem_nid, cpu_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	struct memory_locality *loc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	u32 best = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	bool access0done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	mem_nid = pxm_to_node(target->memory_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	 * If the Address Range Structure provides a local processor pxm, link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	 * only that one. Otherwise, find the best performance attributes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	 * register all initiators that match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (target->processor_pxm != PXM_INVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		cpu_nid = pxm_to_node(target->processor_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		access0done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		if (node_state(cpu_nid, N_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	if (list_empty(&localities))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	 * We need the initiator list sorted so we can use bitmap_clear for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	 * previously set initiators when we find a better memory accessor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	 * We'll also use the sorting to prime the candidate nodes with known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	 * initiators.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	bitmap_zero(p_nodes, MAX_NUMNODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	list_sort(p_nodes, &initiators, initiator_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (!access0done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 			loc = localities_types[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 			if (!loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			best = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 			list_for_each_entry(initiator, &initiators, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 				u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				if (!test_bit(initiator->processor_pxm, p_nodes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 				value = hmat_initiator_perf(target, initiator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 							    loc->hmat_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 				if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 					bitmap_clear(p_nodes, 0, initiator->processor_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 				if (value != best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 					clear_bit(initiator->processor_pxm, p_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 			if (best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 				hmat_update_target_access(target, loc->hmat_loc->data_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 							  best, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 			cpu_nid = pxm_to_node(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 			register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	/* Access 1 ignores Generic Initiators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	bitmap_zero(p_nodes, MAX_NUMNODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	list_sort(p_nodes, &initiators, initiator_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	best = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		loc = localities_types[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		if (!loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		best = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		list_for_each_entry(initiator, &initiators, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			if (!initiator->has_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 				clear_bit(initiator->processor_pxm, p_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 			if (!test_bit(initiator->processor_pxm, p_nodes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 			if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 				bitmap_clear(p_nodes, 0, initiator->processor_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 			if (value != best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 				clear_bit(initiator->processor_pxm, p_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		if (best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		cpu_nid = pxm_to_node(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void hmat_register_target_cache(struct memory_target *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	struct target_cache *tcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	list_for_each_entry(tcache, &target->caches, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		node_add_cache(mem_nid, &tcache->cache_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void hmat_register_target_perf(struct memory_target *target, int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void hmat_register_target_devices(struct memory_target *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	 * Do not bother creating devices if no driver is available to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	 * consume them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	for (res = target->memregions.child; res; res = res->sibling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		int target_nid = pxm_to_node(target->memory_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		hmem_register_device(target_nid, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static void hmat_register_target(struct memory_target *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	int nid = pxm_to_node(target->memory_pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	 * Devices may belong to either an offline or online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	 * node, so unconditionally add them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	hmat_register_target_devices(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	 * Skip offline nodes. This can happen when memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	 * marked EFI_MEMORY_SP, "specific purpose", is applied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	 * to all the memory in a promixity domain leading to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	 * the node being marked offline / unplugged, or if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	 * memory-only "hotplug" node is offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	if (nid == NUMA_NO_NODE || !node_online(nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	mutex_lock(&target_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	if (!target->registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		hmat_register_target_initiators(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		hmat_register_target_cache(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		hmat_register_target_perf(target, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		hmat_register_target_perf(target, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		target->registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	mutex_unlock(&target_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static void hmat_register_targets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	struct memory_target *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	list_for_each_entry(target, &targets, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		hmat_register_target(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int hmat_callback(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 			 unsigned long action, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	struct memory_target *target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	struct memory_notify *mnb = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	int pxm, nid = mnb->status_change_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	pxm = node_to_pxm(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	target = find_mem_target(pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	if (!target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	hmat_register_target(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static struct notifier_block hmat_callback_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	.notifier_call = hmat_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	.priority = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static __init void hmat_free_structures(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	struct memory_target *target, *tnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	struct memory_locality *loc, *lnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	struct memory_initiator *initiator, *inext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	struct target_cache *tcache, *cnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	list_for_each_entry_safe(target, tnext, &targets, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		struct resource *res, *res_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 			list_del(&tcache->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			kfree(tcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		list_del(&target->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		res = target->memregions.child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		while (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 			res_next = res->sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 			__release_region(&target->memregions, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 					resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 			res = res_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		kfree(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	list_for_each_entry_safe(initiator, inext, &initiators, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 		list_del(&initiator->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		kfree(initiator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	list_for_each_entry_safe(loc, lnext, &localities, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		list_del(&loc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 		kfree(loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static __init int hmat_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	struct acpi_table_header *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	enum acpi_hmat_type i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	if (srat_disabled() || hmat_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	if (acpi_table_parse_entries(ACPI_SIG_SRAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 				sizeof(struct acpi_table_srat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 				ACPI_SRAT_TYPE_MEMORY_AFFINITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 				srat_parse_mem_affinity, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	acpi_put_table(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	hmat_revision = tbl->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	switch (hmat_revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		if (acpi_table_parse_entries(ACPI_SIG_HMAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 					     sizeof(struct acpi_table_hmat), i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 					     hmat_parse_subtable, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 			pr_notice("Ignoring HMAT: Invalid table");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 			goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	hmat_register_targets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	/* Keep the table and structures if the notifier may use them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	if (!register_hotmemory_notifier(&hmat_callback_nb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	hmat_free_structures();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	acpi_put_table(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) device_initcall(hmat_init);