^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cacheinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static struct riscv_cacheinfo_ops *rv_cache_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) rv_cache_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) const struct attribute_group *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) cache_get_priv_group(struct cacheinfo *this_leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (rv_cache_ops && rv_cache_ops->get_priv_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return rv_cache_ops->get_priv_group(this_leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Using raw_smp_processor_id() elides a preemptability check, but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * is really indicative of a larger problem: the cacheinfo UABI assumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * that cores have a homonogenous view of the cache hierarchy. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * happens to be the case for the current set of RISC-V systems, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * likely won't be true in general. Since there's no way to provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * correct information for these systems via the current UABI we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * just eliding the check for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct cacheinfo *this_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) for (index = 0; index < this_cpu_ci->num_leaves; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) this_leaf = this_cpu_ci->info_list + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (this_leaf->level == level && this_leaf->type == type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return this_leaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) uintptr_t get_cache_size(u32 level, enum cache_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct cacheinfo *this_leaf = get_cacheinfo(level, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return this_leaf ? this_leaf->size : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) uintptr_t get_cache_geometry(u32 level, enum cache_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct cacheinfo *this_leaf = get_cacheinfo(level, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return this_leaf ? (this_leaf->ways_of_associativity << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) this_leaf->coherency_line_size) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int level, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int sets, unsigned int line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) this_leaf->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) this_leaf->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) this_leaf->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) this_leaf->number_of_sets = sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) this_leaf->coherency_line_size = line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * If the cache is fully associative, there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * check the other properties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (sets == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Set the ways number for n-ways associative, make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * all properties are big than zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (sets > 0 && size > 0 && line_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) this_leaf->ways_of_associativity = (size / sets) / line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void fill_cacheinfo(struct cacheinfo **this_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct device_node *node, unsigned int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned int size, sets, line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!of_property_read_u32(node, "cache-size", &size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) !of_property_read_u32(node, "cache-block-size", &line_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) !of_property_read_u32(node, "cache-sets", &sets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!of_property_read_u32(node, "i-cache-size", &size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) !of_property_read_u32(node, "i-cache-sets", &sets) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) !of_property_read_u32(node, "i-cache-block-size", &line_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!of_property_read_u32(node, "d-cache-size", &size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) !of_property_read_u32(node, "d-cache-sets", &sets) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) !of_property_read_u32(node, "d-cache-block-size", &line_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int init_cache_level(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct device_node *np = of_cpu_device_node_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct device_node *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int levels = 0, leaves = 0, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (of_property_read_bool(np, "cache-size"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ++leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (of_property_read_bool(np, "i-cache-size"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ++leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (of_property_read_bool(np, "d-cache-size"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ++leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (leaves > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) levels = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) prev = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) while ((np = of_find_next_cache_node(np))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) of_node_put(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) prev = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!of_device_is_compatible(np, "cache"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (of_property_read_u32(np, "cache-level", &level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (level <= levels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (of_property_read_bool(np, "cache-size"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ++leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (of_property_read_bool(np, "i-cache-size"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ++leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (of_property_read_bool(np, "d-cache-size"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ++leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) levels = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) this_cpu_ci->num_levels = levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) this_cpu_ci->num_leaves = leaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int populate_cache_leaves(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct cacheinfo *this_leaf = this_cpu_ci->info_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct device_node *np = of_cpu_device_node_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct device_node *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int levels = 1, level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Level 1 caches in cpu node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fill_cacheinfo(&this_leaf, np, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Next level caches in cache nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) prev = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) while ((np = of_find_next_cache_node(np))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) of_node_put(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) prev = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!of_device_is_compatible(np, "cache"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (of_property_read_u32(np, "cache-level", &level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (level <= levels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) fill_cacheinfo(&this_leaf, np, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) levels = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }