^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * cacheinfo support - processor cache information via sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Sudeep Holla <sudeep.holla@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cacheinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* pointer to per cpu cacheinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return ci_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct cacheinfo *sib_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return sib_leaf->fw_token == this_leaf->fw_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* OF properties to query for a given cache type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct cache_type_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) const char *size_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const char *line_size_props[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) const char *nr_sets_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static const struct cache_type_info cache_type_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .size_prop = "cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .line_size_props = { "cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) "cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .nr_sets_prop = "cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .size_prop = "i-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .line_size_props = { "i-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) "i-cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) .nr_sets_prop = "i-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .size_prop = "d-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .line_size_props = { "d-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) "d-cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .nr_sets_prop = "d-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline int get_cacheinfo_idx(enum cache_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (type == CACHE_TYPE_UNIFIED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) const char *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int ct_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ct_idx = get_cacheinfo_idx(this_leaf->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) propname = cache_type_info[ct_idx].size_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) of_property_read_u32(np, propname, &this_leaf->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* not cache_line_size() because that's a macro in include/linux/cache.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void cache_get_line_size(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int i, lim, ct_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ct_idx = get_cacheinfo_idx(this_leaf->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) for (i = 0; i < lim; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) const char *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) propname = cache_type_info[ct_idx].line_size_props[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ret = of_property_read_u32(np, propname, &line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) this_leaf->coherency_line_size = line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const char *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int ct_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ct_idx = get_cacheinfo_idx(this_leaf->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) propname = cache_type_info[ct_idx].nr_sets_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) of_property_read_u32(np, propname, &this_leaf->number_of_sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void cache_associativity(struct cacheinfo *this_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned int line_size = this_leaf->coherency_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int nr_sets = this_leaf->number_of_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned int size = this_leaf->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * If the cache is fully associative, there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * check the other properties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static bool cache_node_is_unified(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return of_property_read_bool(np, "cache-unified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void cache_of_set_props(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * init_cache_level must setup the cache level correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * overriding the architecturally specified levels, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * if type is NONE at this stage, it should be unified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (this_leaf->type == CACHE_TYPE_NOCACHE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cache_node_is_unified(this_leaf, np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) this_leaf->type = CACHE_TYPE_UNIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cache_size(this_leaf, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cache_get_line_size(this_leaf, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cache_nr_sets(this_leaf, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cache_associativity(this_leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int cache_setup_of_node(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct cacheinfo *this_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct device *cpu_dev = get_cpu_device(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* skip if fw_token is already populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (this_cpu_ci->info_list->fw_token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!cpu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pr_err("No cpu device for CPU %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) np = cpu_dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pr_err("Failed to find cpu%d device node\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) while (index < cache_leaves(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (this_leaf->level != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) np = of_find_next_cache_node(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) np = of_node_get(np);/* cpu node itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) cache_of_set_props(this_leaf, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) this_leaf->fw_token = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (index != cache_leaves(cpu)) /* not all OF nodes populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct cacheinfo *sib_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * shared caches for all other levels. This will be used only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * arch specific code has not populated shared_cpu_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return !(this_leaf->level == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int __weak cache_setup_acpi(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int coherency_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int cache_shared_cpu_map_setup(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct cacheinfo *this_leaf, *sib_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (this_cpu_ci->cpu_map_populated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (of_have_populated_dt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ret = cache_setup_of_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) else if (!acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ret = cache_setup_acpi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for (index = 0; index < cache_leaves(cpu); index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* skip if shared_cpu_map is already populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!cpumask_empty(&this_leaf->shared_cpu_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (i == cpu || !sib_cpu_ci->info_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) continue;/* skip if itself or no cacheinfo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) sib_leaf = sib_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* record the maximum cache line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (this_leaf->coherency_line_size > coherency_max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) coherency_max_size = this_leaf->coherency_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void cache_shared_cpu_map_remove(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct cacheinfo *this_leaf, *sib_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned int sibling, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) for (index = 0; index < cache_leaves(cpu); index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct cpu_cacheinfo *sib_cpu_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (sibling == cpu) /* skip itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) sib_cpu_ci = get_cpu_cacheinfo(sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!sib_cpu_ci->info_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sib_leaf = sib_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (of_have_populated_dt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) of_node_put(this_leaf->fw_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void free_cache_attributes(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!per_cpu_cacheinfo(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) cache_shared_cpu_map_remove(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) kfree(per_cpu_cacheinfo(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) per_cpu_cacheinfo(cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int __weak init_cache_level(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int __weak populate_cache_leaves(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int detect_cache_attributes(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (init_cache_level(cpu) || !cache_leaves(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) sizeof(struct cacheinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (per_cpu_cacheinfo(cpu) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * populate_cache_leaves() may completely setup the cache leaves and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * shared_cpu_map or it may leave it partially setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = populate_cache_leaves(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto free_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * For systems using DT for cache hierarchy, fw_token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * and shared_cpu_map will be set up here only if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * not populated already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ret = cache_shared_cpu_map_setup(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto free_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) free_ci:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) free_cache_attributes(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* pointer to cpuX/cache device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static DEFINE_PER_CPU(struct device *, ci_cache_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static cpumask_t cache_dev_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* pointer to array of devices for cpuX/cache/indexY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static DEFINE_PER_CPU(struct device **, ci_index_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define show_one(file_name, object) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static ssize_t file_name##_show(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct device_attribute *attr, char *buf) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return sysfs_emit(buf, "%u\n", this_leaf->object); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) show_one(id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) show_one(level, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) show_one(coherency_line_size, coherency_line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) show_one(number_of_sets, number_of_sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) show_one(physical_line_partition, physical_line_partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) show_one(ways_of_associativity, ways_of_associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static ssize_t size_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static ssize_t shared_cpu_map_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) const struct cpumask *mask = &this_leaf->shared_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static ssize_t shared_cpu_list_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) const struct cpumask *mask = &this_leaf->shared_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static ssize_t type_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) const char *output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) switch (this_leaf->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case CACHE_TYPE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) output = "Data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case CACHE_TYPE_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) output = "Instruction";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) case CACHE_TYPE_UNIFIED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) output = "Unified";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return sysfs_emit(buf, "%s\n", output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static ssize_t allocation_policy_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int ci_attr = this_leaf->attributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) const char *output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) output = "ReadWriteAllocate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) else if (ci_attr & CACHE_READ_ALLOCATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) output = "ReadAllocate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) else if (ci_attr & CACHE_WRITE_ALLOCATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) output = "WriteAllocate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return sysfs_emit(buf, "%s\n", output);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static ssize_t write_policy_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned int ci_attr = this_leaf->attributes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (ci_attr & CACHE_WRITE_THROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) n = sysfs_emit(buf, "WriteThrough\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) else if (ci_attr & CACHE_WRITE_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) n = sysfs_emit(buf, "WriteBack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static DEVICE_ATTR_RO(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static DEVICE_ATTR_RO(level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static DEVICE_ATTR_RO(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static DEVICE_ATTR_RO(coherency_line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static DEVICE_ATTR_RO(ways_of_associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static DEVICE_ATTR_RO(number_of_sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static DEVICE_ATTR_RO(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static DEVICE_ATTR_RO(allocation_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static DEVICE_ATTR_RO(write_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static DEVICE_ATTR_RO(shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static DEVICE_ATTR_RO(shared_cpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static DEVICE_ATTR_RO(physical_line_partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static struct attribute *cache_default_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) &dev_attr_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) &dev_attr_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) &dev_attr_level.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) &dev_attr_shared_cpu_map.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) &dev_attr_shared_cpu_list.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) &dev_attr_coherency_line_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) &dev_attr_ways_of_associativity.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) &dev_attr_number_of_sets.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) &dev_attr_size.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) &dev_attr_allocation_policy.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) &dev_attr_write_policy.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) &dev_attr_physical_line_partition.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static umode_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) cache_default_attrs_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct attribute *attr, int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct cacheinfo *this_leaf = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) const struct cpumask *mask = &this_leaf->shared_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) umode_t mode = attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if ((attr == &dev_attr_type.attr) && this_leaf->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if ((attr == &dev_attr_level.attr) && this_leaf->level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if ((attr == &dev_attr_coherency_line_size.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) this_leaf->coherency_line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if ((attr == &dev_attr_ways_of_associativity.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) this_leaf->size) /* allow 0 = full associativity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if ((attr == &dev_attr_number_of_sets.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) this_leaf->number_of_sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if ((attr == &dev_attr_size.attr) && this_leaf->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if ((attr == &dev_attr_write_policy.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if ((attr == &dev_attr_allocation_policy.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if ((attr == &dev_attr_physical_line_partition.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) this_leaf->physical_line_partition)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static const struct attribute_group cache_default_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) .attrs = cache_default_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) .is_visible = cache_default_attrs_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static const struct attribute_group *cache_default_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) &cache_default_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static const struct attribute_group *cache_private_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) &cache_default_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) NULL, /* Place holder for private group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) const struct attribute_group *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) __weak cache_get_priv_group(struct cacheinfo *this_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static const struct attribute_group **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) cache_get_attribute_groups(struct cacheinfo *this_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) const struct attribute_group *priv_group =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cache_get_priv_group(this_leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!priv_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return cache_default_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!cache_private_groups[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) cache_private_groups[1] = priv_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return cache_private_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Add/Remove cache interface for CPU device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void cpu_cache_sysfs_exit(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct device *ci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (per_cpu_index_dev(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) for (i = 0; i < cache_leaves(cpu); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ci_dev = per_cache_index_dev(cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!ci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) device_unregister(ci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) kfree(per_cpu_index_dev(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) per_cpu_index_dev(cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) device_unregister(per_cpu_cache_dev(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) per_cpu_cache_dev(cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static int cpu_cache_sysfs_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct device *dev = get_cpu_device(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (per_cpu_cacheinfo(cpu) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (IS_ERR(per_cpu_cache_dev(cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return PTR_ERR(per_cpu_cache_dev(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Allocate all required memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) sizeof(struct device *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (unlikely(per_cpu_index_dev(cpu) == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) cpu_cache_sysfs_exit(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int cache_add_dev(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct device *ci_dev, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct cacheinfo *this_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) const struct attribute_group **cache_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) rc = cpu_cache_sysfs_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (unlikely(rc < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) parent = per_cpu_cache_dev(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) for (i = 0; i < cache_leaves(cpu); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) this_leaf = this_cpu_ci->info_list + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (this_leaf->disable_sysfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (this_leaf->type == CACHE_TYPE_NOCACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) cache_groups = cache_get_attribute_groups(this_leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) "index%1u", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_ERR(ci_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rc = PTR_ERR(ci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) per_cache_index_dev(cpu, i) = ci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) cpumask_set_cpu(cpu, &cache_dev_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) cpu_cache_sysfs_exit(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static int cacheinfo_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int rc = detect_cache_attributes(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) rc = cache_add_dev(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) free_cache_attributes(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int cacheinfo_cpu_pre_down(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cpu_cache_sysfs_exit(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) free_cache_attributes(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int __init cacheinfo_sysfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) "base/cacheinfo:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) device_initcall(cacheinfo_sysfs_init);