^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/local64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <soc/qcom/kryo-l2-accessors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define MAX_L2_CTRS 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define L2PMCR_NUM_EV_SHIFT 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define L2PMCR_NUM_EV_MASK 0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define L2PMCR 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define L2PMCNTENCLR 0x403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define L2PMCNTENSET 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define L2PMINTENCLR 0x405
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define L2PMINTENSET 0x406
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define L2PMOVSCLR 0x407
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define L2PMOVSSET 0x408
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define L2PMCCNTCR 0x409
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define L2PMCCNTR 0x40A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define L2PMCCNTSR 0x40C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define L2PMRESR 0x410
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define IA_L2PMXEVCNTCR_BASE 0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define IA_L2PMXEVCNTR_BASE 0x421
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define IA_L2PMXEVFILTER_BASE 0x423
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define IA_L2PMXEVTYPER_BASE 0x424
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define IA_L2_REG_OFFSET 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define L2EVTYPER_REG_SHIFT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define L2PMRESR_GROUP_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define L2PMRESR_GROUP_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define L2CYCLE_CTR_BIT 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define L2CYCLE_CTR_RAW_CODE 0xFE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define L2PMCR_RESET_ALL 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define L2PMCR_COUNTERS_ENABLE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define L2PMCR_COUNTERS_DISABLE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define L2PMRESR_EN BIT_ULL(63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define L2_EVT_MASK 0x00000FFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define L2_EVT_CODE_MASK 0x00000FF0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define L2_EVT_GRP_MASK 0x0000000F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define L2_EVT_CODE_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define L2_EVT_GRP_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define L2_EVT_GROUP_MAX 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define L2_COUNTER_RELOAD BIT_ULL(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define L2_EVENT_CYCLES 0xfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define L2_EVENT_DCACHE_OPS 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define L2_EVENT_ICACHE_OPS 0x401
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define L2_EVENT_TLBI 0x402
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define L2_EVENT_BARRIERS 0x403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define L2_EVENT_TOTAL_READS 0x405
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define L2_EVENT_TOTAL_WRITES 0x406
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define L2_EVENT_TOTAL_REQUESTS 0x407
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define L2_EVENT_LDREX 0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define L2_EVENT_STREX 0x421
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define L2_EVENT_CLREX 0x422
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct cluster_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Aggregate PMU. Implements the core pmu functions and manages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * the hardware PMUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct l2cache_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 num_pmus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct pmu pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cpumask_t cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct cluster_pmu * __percpu *pmu_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct list_head clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * The cache is made up of one or more clusters, each cluster has its own PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Each cluster is associated with one or more CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * This structure represents one of the hardware PMUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Events can be envisioned as a 2-dimensional array. Each column represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * a group of events. There are 8 groups. Only one entry from each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * group can be in use at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Events are specified as 0xCCG, where CC is 2 hex digits specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * the code (array row) and G specifies the group (column).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * which is outside the above scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct cluster_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct perf_event *events[MAX_L2_CTRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct l2cache_pmu *l2cache_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) DECLARE_BITMAP(used_counters, MAX_L2_CTRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int cluster_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* The CPU that is used for collecting events on this cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int on_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* All the CPUs associated with this cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cpumask_t cluster_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spinlock_t pmu_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static u32 l2_cycle_ctr_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static u32 l2_counter_present_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline u32 idx_to_reg_bit(u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (idx == l2_cycle_ctr_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return BIT(L2CYCLE_CTR_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return BIT(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline struct cluster_pmu *get_cluster_pmu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct l2cache_pmu *l2cache_pmu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void cluster_pmu_reset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Reset all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static inline void cluster_pmu_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline void cluster_pmu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (idx == l2_cycle_ctr_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) kryo_l2_set_indirect_reg(L2PMCCNTR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline u64 cluster_pmu_counter_get_value(u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (idx == l2_cycle_ctr_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) value = kryo_l2_get_indirect_reg(L2PMCCNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline void cluster_pmu_counter_enable(u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static inline void cluster_pmu_counter_disable(u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline void cluster_pmu_set_evccntcr(u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 event_group, u32 event_cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u64 field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u64 resr_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u32 shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) shift = L2PMRESR_GROUP_BITS * event_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) spin_lock_irqsave(&cluster->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) resr_val |= field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) resr_val |= L2PMRESR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_unlock_irqrestore(&cluster->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Hardware allows filtering of events based on the originating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * CPU. Turn this off by setting filter bits to allow events from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * all CPUS, subunits and ID independent events in this cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u32 val = L2PMXEVFILTER_SUFILTER_ALL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) L2PMXEVFILTER_ORGFILTER_IDINDEP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) L2PMXEVFILTER_ORGFILTER_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static inline u32 cluster_pmu_getreset_ovsr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline bool cluster_pmu_has_overflowed(u32 ovsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return !!(ovsr & l2_counter_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return !!(ovsr & idx_to_reg_bit(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void l2_cache_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u64 delta, prev, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u32 idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) prev = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) now = cluster_pmu_counter_get_value(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * The cycle counter is 64-bit, but all other counters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * 32-bit, and we must handle 32-bit overflow explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) delta = now - prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (idx != l2_cycle_ctr_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) delta &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void l2_cache_cluster_set_period(struct cluster_pmu *cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct hw_perf_event *hwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u32 idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u64 new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * We limit the max period to half the max counter value so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * that even in the case of extreme interrupt latency the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * counter will (hopefully) not wrap past its initial value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (idx == l2_cycle_ctr_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) new = L2_CYCLE_COUNTER_RELOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) new = L2_COUNTER_RELOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) local64_set(&hwc->prev_count, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) cluster_pmu_counter_set_value(idx, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) unsigned int group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return l2_cycle_ctr_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) idx = find_first_zero_bit(cluster->used_counters, num_ctrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (idx == num_ctrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* The counters are all in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Check for column exclusion: event column already in use by another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * event. This is for events which are not in the same group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Conflicting events in the same group are detected in event_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) group = L2_EVT_GROUP(hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (test_bit(group, cluster->used_groups))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) set_bit(idx, cluster->used_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) set_bit(group, cluster->used_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) clear_bit(idx, cluster->used_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (hwc->config_base != L2CYCLE_CTR_RAW_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct cluster_pmu *cluster = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int num_counters = cluster->l2cache_pmu->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 ovsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ovsr = cluster_pmu_getreset_ovsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!cluster_pmu_has_overflowed(ovsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) for_each_set_bit(idx, cluster->used_counters, num_counters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct perf_event *event = cluster->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (WARN_ON_ONCE(!event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) l2_cache_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) l2_cache_cluster_set_period(cluster, hwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * Implementation of abstract pmu functionality required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * the core perf events code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void l2_cache_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Although there is only one PMU (per socket) controlling multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * physical PMUs (per cluster), because we do not support per-task mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * each event is associated with a CPU. Each event has pmu_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * called on its CPU, so here it is only necessary to enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * counters for the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cluster_pmu_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void l2_cache_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) cluster_pmu_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int l2_cache_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct perf_event *sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct l2cache_pmu *l2cache_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) l2cache_pmu = to_l2cache_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (hwc->sample_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) "Sampling not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (event->cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "Per-task mode not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ((event->attr.config & ~L2_EVT_MASK) != 0)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) (event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) "Invalid config %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* Don't allow groups with mixed PMUs, except for s/w events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (event->group_leader->pmu != event->pmu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) !is_software_event(event->group_leader)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) "Can't create mixed PMU group\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) for_each_sibling_event(sibling, event->group_leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (sibling->pmu != event->pmu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) !is_software_event(sibling)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) "Can't create mixed PMU group\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* CPU has not been initialised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) "CPU%d not associated with L2 cluster\n", event->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* Ensure all events in a group are on the same cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if ((event->group_leader != event) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) (cluster->on_cpu != event->group_leader->cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) "Can't create group on CPUs %d and %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) event->cpu, event->group_leader->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if ((event != event->group_leader) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) !is_software_event(event->group_leader) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) (L2_EVT_GROUP(event->group_leader->attr.config) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) L2_EVT_GROUP(event->attr.config))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) "Column exclusion: conflicting events %llx %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) event->group_leader->attr.config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) for_each_sibling_event(sibling, event->group_leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if ((sibling != event) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) !is_software_event(sibling) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) (L2_EVT_GROUP(sibling->attr.config) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) L2_EVT_GROUP(event->attr.config))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) "Column exclusion: conflicting events %llx %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sibling->attr.config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) hwc->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) hwc->config_base = event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Ensure all events are on the same cpu so all events are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * same cpu context, to avoid races on pmu_enable etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) event->cpu = cluster->on_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static void l2_cache_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u32 event_cc, event_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) l2_cache_cluster_set_period(cluster, hwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) cluster_pmu_set_evccntcr(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) config = hwc->config_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) event_cc = L2_EVT_CODE(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) event_group = L2_EVT_GROUP(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) cluster_pmu_set_evcntcr(idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) cluster_pmu_set_evtyper(idx, event_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) cluster_pmu_set_resr(cluster, event_group, event_cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cluster_pmu_set_evfilter_sys_mode(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) cluster_pmu_counter_enable_interrupt(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) cluster_pmu_counter_enable(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static void l2_cache_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (hwc->state & PERF_HES_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) cluster_pmu_counter_disable_interrupt(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) cluster_pmu_counter_disable(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (flags & PERF_EF_UPDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) l2_cache_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static int l2_cache_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) idx = l2_cache_get_event_idx(cluster, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) hwc->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cluster->events[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) local64_set(&hwc->prev_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) l2_cache_event_start(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Propagate changes to the userspace mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static void l2_cache_event_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) cluster->events[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) l2_cache_clear_event_idx(cluster, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static void l2_cache_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) l2_cache_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static struct device_attribute l2_cache_pmu_cpumask_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) &l2_cache_pmu_cpumask_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static struct attribute_group l2_cache_pmu_cpumask_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) .attrs = l2_cache_pmu_cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* CCG format for perf RAW codes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) PMU_FORMAT_ATTR(l2_code, "config:4-11");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) PMU_FORMAT_ATTR(l2_group, "config:0-3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) PMU_FORMAT_ATTR(event, "config:0-11");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static struct attribute *l2_cache_pmu_formats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) &format_attr_l2_code.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) &format_attr_l2_group.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static struct attribute_group l2_cache_pmu_format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .attrs = l2_cache_pmu_formats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static ssize_t l2cache_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct device_attribute *attr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct perf_pmu_events_attr *pmu_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) #define L2CACHE_EVENT_ATTR(_name, _id) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) (&((struct perf_pmu_events_attr[]) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .id = _id, } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) })[0].attr.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static struct attribute *l2_cache_pmu_events[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static struct attribute_group l2_cache_pmu_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .attrs = l2_cache_pmu_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) &l2_cache_pmu_format_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) &l2_cache_pmu_cpumask_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) &l2_cache_pmu_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * Generic device handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static const struct acpi_device_id l2_cache_pmu_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) { "QCOM8130", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static int get_num_counters(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) val = kryo_l2_get_indirect_reg(L2PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * Read number of counters from L2PMCR and add 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * for the cycle counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct l2cache_pmu *l2cache_pmu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u64 mpidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int cpu_cluster_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct cluster_pmu *cluster = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * This assumes that the cluster_id is in MPIDR[aff1] for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * single-threaded cores, and MPIDR[aff2] for multi-threaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * cores. This logic will have to be updated if this changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) mpidr = read_cpuid_mpidr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (mpidr & MPIDR_MT_BITMASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (cluster->cluster_id != cpu_cluster_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dev_info(&l2cache_pmu->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) "CPU%d associated with cluster %d\n", cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) cluster->cluster_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cpumask_set_cpu(cpu, &cluster->cluster_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct l2cache_pmu *l2cache_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) cluster = get_cluster_pmu(l2cache_pmu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* First time this CPU has come online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* Only if broken firmware doesn't list every cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* If another CPU is managing this cluster, we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (cluster->on_cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * All CPUs on this cluster were down, use this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Reset to put it into sane state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) cluster->on_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) cluster_pmu_reset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) enable_irq(cluster->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct l2cache_pmu *l2cache_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) cpumask_t cluster_online_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) unsigned int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) cluster = get_cluster_pmu(l2cache_pmu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* If this CPU is not managing the cluster, we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (cluster->on_cpu != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Give up ownership of cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cluster->on_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Any other CPU for this cluster which is still online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) target = cpumask_any_but(&cluster_online_cpus, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (target >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) disable_irq(cluster->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) cluster->on_cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cpumask_set_cpu(target, &l2cache_pmu->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct platform_device *pdev = to_platform_device(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct platform_device *sdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct l2cache_pmu *l2cache_pmu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct cluster_pmu *cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct acpi_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unsigned long fw_cluster_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dev_err(&pdev->dev, "unable to read ACPI uid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) INIT_LIST_HEAD(&cluster->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) list_add(&cluster->next, &l2cache_pmu->clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) cluster->cluster_id = fw_cluster_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) irq = platform_get_irq(sdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) irq_set_status_flags(irq, IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) cluster->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) cluster->l2cache_pmu = l2cache_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cluster->on_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) IRQF_NOBALANCING | IRQF_NO_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) "l2-cache-pmu", cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) "Unable to request IRQ%d for L2 PMU counters\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) "Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) spin_lock_init(&cluster->pmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) l2cache_pmu->num_pmus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static int l2_cache_pmu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct l2cache_pmu *l2cache_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) l2cache_pmu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (!l2cache_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) INIT_LIST_HEAD(&l2cache_pmu->clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) platform_set_drvdata(pdev, l2cache_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) l2cache_pmu->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* suffix is instance id for future use with multiple sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .name = "l2cache_0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) .task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) .pmu_enable = l2_cache_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) .pmu_disable = l2_cache_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) .event_init = l2_cache_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .add = l2_cache_event_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .del = l2_cache_event_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .start = l2_cache_event_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .stop = l2_cache_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .read = l2_cache_event_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .attr_groups = l2_cache_pmu_attr_grps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) l2cache_pmu->num_counters = get_num_counters();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) l2cache_pmu->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct cluster_pmu *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!l2cache_pmu->pmu_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) BIT(L2CYCLE_CTR_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) cpumask_clear(&l2cache_pmu->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Read cluster info and initialize each cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) err = device_for_each_child(&pdev->dev, l2cache_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) l2_cache_pmu_probe_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (l2cache_pmu->num_pmus == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) &l2cache_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dev_err(&pdev->dev, "Error %d registering hotplug", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) l2cache_pmu->num_pmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) &l2cache_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int l2_cache_pmu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct l2cache_pmu *l2cache_pmu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) to_l2cache_pmu(platform_get_drvdata(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) perf_pmu_unregister(&l2cache_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) &l2cache_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static struct platform_driver l2_cache_pmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .name = "qcom-l2cache-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .probe = l2_cache_pmu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .remove = l2_cache_pmu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static int __init register_l2_cache_pmu_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) "AP_PERF_ARM_QCOM_L2_ONLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) l2cache_pmu_online_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) l2cache_pmu_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return platform_driver_register(&l2_cache_pmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) device_initcall(register_l2_cache_pmu_driver);