Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Processor cache information made available to userspace via sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * intended to be compatible with x86 intel_cacheinfo implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2008 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Nathan Lynch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define pr_fmt(fmt) "cacheinfo: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kobject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "cacheinfo.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /* per-cpu object for tracking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * - a "cache" kobject for the top-level directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * - a list of "index" objects representing the cpu's local cache hierarchy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) struct cache_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct kobject *kobj; /* bare (not embedded) kobject for cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			       * directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct cache_index_dir *index; /* list of index objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /* "index" object: each cpu's cache directory has an index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * subdirectory corresponding to a cache object associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * cpu.  This object's lifetime is managed via the embedded kobject.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) struct cache_index_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct kobject kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct cache_index_dir *next; /* next index in parent directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /* Template for determining which OF properties to query for a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * cache type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) struct cache_type_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	const char *size_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	/* Allow for both [di]-cache-line-size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	 * [di]-cache-block-size properties.  According to the PowerPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 * Processor binding, -line-size should be provided if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * differs from the cache block size (that which is operated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * on by cache instructions), so we look for -line-size first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * See cache_get_line_size(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	const char *line_size_props[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	const char *nr_sets_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /* These are used to index the cache_type_info array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define CACHE_TYPE_INSTRUCTION 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define CACHE_TYPE_DATA        3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static const struct cache_type_info cache_type_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		/* Embedded systems that use cache-size, cache-block-size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		 * etc. for the Unified (typically L2) cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		.name            = "Unified",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		.size_prop       = "cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		.line_size_props = { "cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				     "cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		.nr_sets_prop    = "cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		/* PowerPC Processor binding says the [di]-cache-*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		 * must be equal on unified caches, so just use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		 * d-cache properties. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		.name            = "Unified",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		.size_prop       = "d-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		.line_size_props = { "d-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 				     "d-cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		.nr_sets_prop    = "d-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		.name            = "Instruction",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		.size_prop       = "i-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		.line_size_props = { "i-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 				     "i-cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		.nr_sets_prop    = "i-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		.name            = "Data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		.size_prop       = "d-cache-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		.line_size_props = { "d-cache-line-size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				     "d-cache-block-size", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		.nr_sets_prop    = "d-cache-sets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Cache object: each instance of this corresponds to a distinct cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * in the system.  There are separate objects for Harvard caches: one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * each for instruction and data, and each refers to the same OF node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * The refcount of the OF node is elevated for the lifetime of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * cache object.  A cache object is released when its shared_cpu_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * is cleared (see cache_cpu_clear).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * A cache object is on two lists: an unsorted global list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * (cache_list) of cache objects; and a singly-linked list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * representing the local cache hierarchy, which is ordered by level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * (e.g. L1d -> L1i -> L2 -> L3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct cpumask shared_cpu_map; /* online CPUs using this cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	int type;                      /* split cache disambiguation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int level;                     /* level not explicit in device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct list_head list;         /* global list of cache objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct cache *next_local;      /* next cache of >= level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* traversal/modification of this list occurs only at cpu hotplug time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * access is serialized by cpu hotplug locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static LIST_HEAD(cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return container_of(k, struct cache_index_dir, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static const char *cache_type_string(const struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return cache_type_info[cache->type].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void cache_init(struct cache *cache, int type, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		       struct device_node *ofnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	cache->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	cache->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	cache->ofnode = of_node_get(ofnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	INIT_LIST_HEAD(&cache->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	list_add(&cache->list, &cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static struct cache *new_cache(int type, int level, struct device_node *ofnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		cache_init(cache, type, level, ofnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void release_cache_debugcheck(struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	struct cache *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	list_for_each_entry(iter, &cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		WARN_ONCE(iter->next_local == cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			  "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			  iter->ofnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			  cache_type_string(iter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			  cache->ofnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			  cache_type_string(cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void release_cache(struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (!cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		 cache_type_string(cache), cache->ofnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	release_cache_debugcheck(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	list_del(&cache->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	of_node_put(cache->ofnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	kfree(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void cache_cpu_set(struct cache *cache, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct cache *next = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	while (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			  "CPU %i already accounted in %pOFP(%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			  cpu, next->ofnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			  cache_type_string(next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		cpumask_set_cpu(cpu, &next->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		next = next->next_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int cache_size(const struct cache *cache, unsigned int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	const char *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	const __be32 *cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	propname = cache_type_info[cache->type].size_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	cache_size = of_get_property(cache->ofnode, propname, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (!cache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	*ret = of_read_number(cache_size, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int cache_size_kb(const struct cache *cache, unsigned int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (cache_size(cache, &size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	*ret = size / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* not cache_line_size() because that's a macro in include/linux/cache.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	const __be32 *line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	int i, lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	for (i = 0; i < lim; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		const char *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		propname = cache_type_info[cache->type].line_size_props[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		line_size = of_get_property(cache->ofnode, propname, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		if (line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (!line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	*ret = of_read_number(line_size, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	const char *propname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	const __be32 *nr_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	propname = cache_type_info[cache->type].nr_sets_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	nr_sets = of_get_property(cache->ofnode, propname, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (!nr_sets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	*ret = of_read_number(nr_sets, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int cache_associativity(const struct cache *cache, unsigned int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	unsigned int line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	unsigned int nr_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (cache_nr_sets(cache, &nr_sets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/* If the cache is fully associative, there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 * check the other properties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (nr_sets == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		*ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (cache_get_line_size(cache, &line_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (cache_size(cache, &size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (!(nr_sets > 0 && size > 0 && line_size > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	*ret = (size / nr_sets) / line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* helper for dealing with split caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static struct cache *cache_find_first_sibling(struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct cache *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (cache->type == CACHE_TYPE_UNIFIED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	    cache->type == CACHE_TYPE_UNIFIED_D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	list_for_each_entry(iter, &cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* return the first cache on a local list matching node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static struct cache *cache_lookup_by_node(const struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct cache *cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	struct cache *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	list_for_each_entry(iter, &cache_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		if (iter->ofnode != node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		cache = cache_find_first_sibling(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static bool cache_node_is_unified(const struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return of_get_property(np, "cache-unified", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * Unified caches can have two different sets of tags.  Most embedded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * use cache-size, etc. for the unified cache size, but open firmware systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * use d-cache-size, etc.   Check on initialization for which type we have, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * return the appropriate structure type.  Assume it's embedded if it isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * open firmware.  If it's yet a 3rd type, then there will be missing entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * to be extended further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int cache_is_unified_d(const struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return of_get_property(np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	pr_debug("creating L%d ucache for %pOFP\n", level, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return new_cache(cache_is_unified_d(node), level, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static struct cache *cache_do_one_devnode_split(struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 						int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	struct cache *dcache, *icache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	pr_debug("creating L%d dcache and icache for %pOFP\n", level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		 node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	dcache = new_cache(CACHE_TYPE_DATA, level, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (!dcache || !icache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	dcache->next_local = icache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	return dcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	release_cache(dcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	release_cache(icache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static struct cache *cache_do_one_devnode(struct device_node *node, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (cache_node_is_unified(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		cache = cache_do_one_devnode_unified(node, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		cache = cache_do_one_devnode_split(node, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static struct cache *cache_lookup_or_instantiate(struct device_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 						 int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	cache = cache_lookup_by_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	WARN_ONCE(cache && cache->level != level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		  "cache level mismatch on lookup (got %d, expected %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		  cache->level, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (!cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		cache = cache_do_one_devnode(node, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void link_cache_lists(struct cache *smaller, struct cache *bigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	while (smaller->next_local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		if (smaller->next_local == bigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			return; /* already linked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		smaller = smaller->next_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	smaller->next_local = bigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	 * The cache->next_local list sorts by level ascending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	 * L1d -> L1i -> L2 -> L3 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		  (smaller->level > 1 && bigger->level != smaller->level + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		  "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		  smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void do_subsidiary_caches_debugcheck(struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	WARN_ONCE(cache->level != 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		  "instantiating cache chain from L%d %s cache for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		  "%pOFP instead of an L1\n", cache->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		  cache_type_string(cache), cache->ofnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		  "instantiating cache chain from node %pOFP of type '%s' "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		  "instead of a cpu node\n", cache->ofnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		  of_node_get_device_type(cache->ofnode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void do_subsidiary_caches(struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct device_node *subcache_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	int level = cache->level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	do_subsidiary_caches_debugcheck(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		struct cache *subcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		level++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		subcache = cache_lookup_or_instantiate(subcache_node, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		of_node_put(subcache_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		if (!subcache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		link_cache_lists(cache, subcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		cache = subcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static struct cache *cache_chain_instantiate(unsigned int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct device_node *cpu_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct cache *cpu_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	cpu_node = of_get_cpu_node(cpu_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	if (!cpu_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (!cpu_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	do_subsidiary_caches(cpu_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	cache_cpu_set(cpu_cache, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	of_node_put(cpu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	return cpu_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct cache_dir *cache_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	struct kobject *kobj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	dev = get_cpu_device(cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	kobj = kobject_create_and_add("cache", &dev->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	if (!kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	if (!cache_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	cache_dir->kobj = kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	return cache_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	kobject_put(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void cache_index_release(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	struct cache_index_dir *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	index = kobj_to_cache_index_dir(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	pr_debug("freeing index directory for L%d %s cache\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		 index->cache->level, cache_type_string(index->cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	kfree(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	struct kobj_attribute *kobj_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	kobj_attr = container_of(attr, struct kobj_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	return kobj_attr->show(k, kobj_attr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static struct cache *index_kobj_to_cache(struct kobject *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	struct cache_index_dir *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	index = kobj_to_cache_index_dir(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	return index->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	unsigned int size_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	cache = index_kobj_to_cache(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (cache_size_kb(cache, &size_kb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	return sprintf(buf, "%uK\n", size_kb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static struct kobj_attribute cache_size_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	__ATTR(size, 0444, size_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	unsigned int line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	cache = index_kobj_to_cache(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	if (cache_get_line_size(cache, &line_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	return sprintf(buf, "%u\n", line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static struct kobj_attribute cache_line_size_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	unsigned int nr_sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	cache = index_kobj_to_cache(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	if (cache_nr_sets(cache, &nr_sets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	return sprintf(buf, "%u\n", nr_sets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static struct kobj_attribute cache_nr_sets_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	unsigned int associativity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	cache = index_kobj_to_cache(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	if (cache_associativity(cache, &associativity))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	return sprintf(buf, "%u\n", associativity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static struct kobj_attribute cache_assoc_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	cache = index_kobj_to_cache(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	return sprintf(buf, "%s\n", cache_type_string(cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static struct kobj_attribute cache_type_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	__ATTR(type, 0444, type_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	struct cache_index_dir *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	index = kobj_to_cache_index_dir(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	cache = index->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	return sprintf(buf, "%d\n", cache->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static struct kobj_attribute cache_level_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	__ATTR(level, 0444, level_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	struct kobject *index_dir_kobj = &index->kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	struct kobject *cache_dir_kobj = index_dir_kobj->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	struct device *dev = kobj_to_dev(cpu_dev_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	return dev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)  * On big-core systems, each core has two groups of CPUs each of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)  * has its own L1-cache. The thread-siblings which share l1-cache with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)  * @cpu can be obtained via cpu_smallcore_mask().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	if (cache->level == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		return cpu_smallcore_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	return &cache->shared_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	struct cache_index_dir *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	const struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	index = kobj_to_cache_index_dir(k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	cache = index->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	if (has_big_cores) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		cpu = index_dir_to_cpu(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		mask = get_big_core_shared_cpu_map(cpu, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		mask  = &cache->shared_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	return cpumap_print_to_pagebuf(list, buf, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	return show_shared_cpumap(k, attr, buf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	return show_shared_cpumap(k, attr, buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static struct kobj_attribute cache_shared_cpu_map_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static struct kobj_attribute cache_shared_cpu_list_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Attributes which should always be created -- the kobject/sysfs core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)  * does this automatically via kobj_type->default_attrs.  This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)  * minimum data required to uniquely identify a cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static struct attribute *cache_index_default_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	&cache_type_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	&cache_level_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	&cache_shared_cpu_map_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	&cache_shared_cpu_list_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Attributes which should be created if the cache device node has the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)  * right properties -- see cacheinfo_create_index_opt_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static struct kobj_attribute *cache_index_opt_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	&cache_size_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	&cache_line_size_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	&cache_nr_sets_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	&cache_assoc_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static const struct sysfs_ops cache_index_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	.show = cache_index_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static struct kobj_type cache_index_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	.release = cache_index_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	.sysfs_ops = &cache_index_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	.default_attrs = cache_index_default_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	const char *cache_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	cache = dir->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	cache_type = cache_type_string(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	/* We don't want to create an attribute that can't provide a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	 * meaningful value.  Check the return value of each optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	 * attribute's ->show method before registering the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	 * attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 		struct kobj_attribute *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 		ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		attr = cache_index_opt_attrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		rc = attr->show(&dir->kobj, attr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		if (rc <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 			pr_debug("not creating %s attribute for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 				 "%pOFP(%s) (rc = %zd)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 				 attr->attr.name, cache->ofnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 				 cache_type, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		if (sysfs_create_file(&dir->kobj, &attr->attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			pr_debug("could not create %s attribute for %pOFP(%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 				 attr->attr.name, cache->ofnode, cache_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void cacheinfo_create_index_dir(struct cache *cache, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 				       struct cache_dir *cache_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	struct cache_index_dir *index_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	if (!index_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	index_dir->cache = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 				  cache_dir->kobj, "index%d", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		kobject_put(&index_dir->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	index_dir->next = cache_dir->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	cache_dir->index = index_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	cacheinfo_create_index_opt_attrs(index_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static void cacheinfo_sysfs_populate(unsigned int cpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 				     struct cache *cache_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	struct cache_dir *cache_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	cache_dir = cacheinfo_create_cache_dir(cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	if (!cache_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	cache = cache_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	while (cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		cacheinfo_create_index_dir(cache, index, cache_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 		cache = cache->next_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) void cacheinfo_cpu_online(unsigned int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	cache = cache_chain_instantiate(cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	if (!cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	cacheinfo_sysfs_populate(cpu_id, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* functions needed to remove cache entry for cpu offline or suspend/resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)     defined(CONFIG_HOTPLUG_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	struct device_node *cpu_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	cpu_node = of_get_cpu_node(cpu_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	if (!cpu_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	cache = cache_lookup_by_node(cpu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	of_node_put(cpu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static void remove_index_dirs(struct cache_dir *cache_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	struct cache_index_dir *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	index = cache_dir->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	while (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 		struct cache_index_dir *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		next = index->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 		kobject_put(&index->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 		index = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static void remove_cache_dir(struct cache_dir *cache_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	remove_index_dirs(cache_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 	/* Remove cache dir from sysfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	kobject_del(cache_dir->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	kobject_put(cache_dir->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	kfree(cache_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void cache_cpu_clear(struct cache *cache, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	while (cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 		struct cache *next = cache->next_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 			  "CPU %i not accounted in %pOFP(%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 			  cpu, cache->ofnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 			  cache_type_string(cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 		/* Release the cache object if all the cpus using it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 		 * are offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 		if (cpumask_empty(&cache->shared_cpu_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 			release_cache(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 		cache = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) void cacheinfo_cpu_offline(unsigned int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 	struct cache_dir *cache_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 	struct cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	/* Prevent userspace from seeing inconsistent state - remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 	 * the sysfs hierarchy first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 	/* careful, sysfs population may have failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 	if (cache_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 		remove_cache_dir(cache_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	/* clear the CPU's bit in its cache chain, possibly freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 	 * cache objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	cache = cache_lookup_by_cpu(cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 	if (cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 		cache_cpu_clear(cache, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) void cacheinfo_teardown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 	for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 		cacheinfo_cpu_offline(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) void cacheinfo_rebuild(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 	lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 	for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 		cacheinfo_cpu_online(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */