^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * arch/arm64/kernel/topology.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2011,2013,2014 Linaro Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on the arm32 version written by Vincent Guittot in turn based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * arch/sh/kernel/topology.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/arch_topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cacheinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void store_cpu_topology(unsigned int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 mpidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (cpuid_topo->package_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) goto topology_populated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) mpidr = read_cpuid_mpidr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Uniprocessor systems can rely on default topology values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (mpidr & MPIDR_UP_BITMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * This would be the place to create cpu topology based on MPIDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * However, it cannot be trusted to depict the actual topology; some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * pieces of the architecture enforce an artificial cap on Aff0 values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * having absolutely no relationship to the actual underlying system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * topology, and cannot be reasonably used as core / package ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * If the MT bit is set, Aff0 *could* be used to define a thread ID, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * we still wouldn't be able to obtain a sane core ID. This means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * need to entirely ignore MPIDR for any topology deduction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cpuid_topo->thread_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cpuid_topo->core_id = cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) cpuid_topo->package_id = cpu_to_node(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) cpuid_topo->thread_id, mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) topology_populated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) update_siblings_masks(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static bool __init acpi_cpu_is_threaded(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int is_threaded = acpi_pptt_cpu_is_thread(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * if the PPTT doesn't have thread information, assume a homogeneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * machine and return the current CPU's thread state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (is_threaded < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return !!is_threaded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Propagate the topology information of the processor_topology_node tree to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * cpu_topology array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int __init parse_acpi_topology(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int cpu, topology_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int i, cache_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) topology_id = find_acpi_cpu_topology(cpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (topology_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return topology_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (acpi_cpu_is_threaded(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cpu_topology[cpu].thread_id = topology_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) topology_id = find_acpi_cpu_topology(cpu, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cpu_topology[cpu].core_id = topology_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cpu_topology[cpu].thread_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) cpu_topology[cpu].core_id = topology_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) topology_id = find_acpi_cpu_topology_package(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cpu_topology[cpu].package_id = topology_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) i = acpi_find_last_cache_level(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * this is the only part of cpu_topology that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * a direct relationship with the cache topology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cache_id = find_acpi_cpu_cache_topology(cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (cache_id > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cpu_topology[cpu].llc_id = cache_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #ifdef CONFIG_ARM64_AMU_EXTN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define pr_fmt(fmt) "AMU: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static cpumask_var_t amu_fie_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Initialize counter reference per-cpu variables for the current CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void init_cpu_freq_invariance_counters(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) this_cpu_write(arch_core_cycles_prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) this_cpu_write(arch_const_cycles_prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int validate_cpu_freq_invariance_counters(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u64 max_freq_hz, ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!cpu_has_amu_feat(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pr_debug("CPU%d: counters are not supported.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) !per_cpu(arch_core_cycles_prev, cpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Convert maximum frequency from KHz to Hz and validate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (unlikely(!max_freq_hz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pr_debug("CPU%d: invalid maximum frequency.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Pre-compute the fixed ratio between the frequency of the constant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * counter and the maximum frequency of the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * const_freq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE²
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * cpuinfo_max_freq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE²
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * in order to ensure a good resolution for arch_max_freq_scale for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * very low arch timer frequencies (down to the KHz range which should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * be unlikely).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ratio = div64_u64(ratio, max_freq_hz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) WARN_ONCE(1, "System timer frequency too low.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) per_cpu(arch_max_freq_scale, cpu) = (unsigned long)ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) enable_policy_freq_counters(int cpu, cpumask_var_t valid_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_debug("CPU%d: No cpufreq policy found.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (cpumask_subset(policy->related_cpus, valid_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cpumask_or(amu_fie_cpus, policy->related_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) amu_fie_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cpufreq_cpu_put(policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int __init init_amu_fie(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) cpumask_var_t valid_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bool have_policy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!zalloc_cpumask_var(&valid_cpus, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) goto free_valid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (validate_cpu_freq_invariance_counters(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) cpumask_set_cpu(cpu, valid_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) have_policy |= enable_policy_freq_counters(cpu, valid_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * If we are not restricted by cpufreq policies, we only enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * the use of the AMU feature for FIE if all CPUs support AMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Otherwise, enable_policy_freq_counters has already enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * policy cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!have_policy && cpumask_equal(valid_cpus, cpu_present_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) cpumask_or(amu_fie_cpus, amu_fie_cpus, valid_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!cpumask_empty(amu_fie_cpus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pr_info("CPUs[%*pbl]: counters will be used for FIE.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cpumask_pr_args(amu_fie_cpus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static_branch_enable(&amu_fie_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * If the system is not fully invariant after AMU init, disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * partial use of counters for frequency invariance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!topology_scale_freq_invariant())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static_branch_disable(&amu_fie_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) free_valid_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) free_cpumask_var(valid_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) late_initcall_sync(init_amu_fie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bool arch_freq_counters_available(const struct cpumask *cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return amu_freq_invariant() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) cpumask_subset(cpus, amu_fie_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void topology_scale_freq_tick(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u64 prev_core_cnt, prev_const_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u64 core_cnt, const_cnt, scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!amu_freq_invariant())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!cpumask_test_cpu(cpu, amu_fie_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (unlikely(core_cnt <= prev_core_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) const_cnt <= prev_const_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) goto store_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * /\core arch_max_freq_scale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * scale = ------- * --------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * /\const SCHED_CAPACITY_SCALE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * See validate_cpu_freq_invariance_counters() for details on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) scale = core_cnt - prev_core_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) scale *= this_cpu_read(arch_max_freq_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) const_cnt - prev_const_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) this_cpu_write(freq_scale, (unsigned long)scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) store_and_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) this_cpu_write(arch_core_cycles_prev, core_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) this_cpu_write(arch_const_cycles_prev, const_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #endif /* CONFIG_ARM64_AMU_EXTN */