^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ARMv8 PMUv3 Performance Events handling code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This code is based heavily on the ARMv7 perf event code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <clocksource/arm_arch_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/perf/arm_pmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* ARMv8 Cortex-A53 specific event types. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* ARMv8 Cavium ThunderX specific event types. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * ARMv8 Architectural defined events, not all of these may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * be supported on any given implementation. Unsupported events will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * be disabled at run-time based on the PMCEID registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) PERF_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) PERF_CACHE_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) PERF_CACHE_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) PERF_CACHE_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) PERF_CACHE_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) PERF_CACHE_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) PERF_CACHE_MAP_ALL_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) armv8pmu_events_sysfs_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct device_attribute *attr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct perf_pmu_events_attr *pmu_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define ARMV8_EVENT_ATTR(name, config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) (&((struct perf_pmu_events_attr) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) .id = config, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }).attr.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static struct attribute *armv8_pmuv3_event_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Don't expose the chain event in /sys, since it's useless in isolation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static umode_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) armv8pmu_event_attr_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct attribute *attr, int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pmu *pmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct perf_pmu_events_attr *pmu_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) test_bit(id, cpu_pmu->pmceid_ext_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static struct attribute_group armv8_pmuv3_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .attrs = armv8_pmuv3_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .is_visible = armv8pmu_event_attr_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) PMU_FORMAT_ATTR(event, "config:0-15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) PMU_FORMAT_ATTR(long, "config1:0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return event->attr.config1 & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static struct attribute *armv8_pmuv3_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) &format_attr_long.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static struct attribute_group armv8_pmuv3_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .attrs = armv8_pmuv3_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct pmu *pmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return sysfs_emit(page, "0x%08x\n", slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static DEVICE_ATTR_RO(slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static struct attribute *armv8_pmuv3_caps_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) &dev_attr_slots.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static struct attribute_group armv8_pmuv3_caps_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .name = "caps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .attrs = armv8_pmuv3_caps_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Perf Events' indices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #define ARMV8_IDX_CYCLE_COUNTER 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define ARMV8_IDX_COUNTER0 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * We unconditionally enable ARMv8.5-PMU long event counter support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * (64-bit events) where supported. Indicate if this arm_pmu has long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * event counter support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * We must chain two programmable counters for 64 bit events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * except when we have allocated the 64bit cycle counter (for CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * cycles event). This must be called only when the event has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * a counter allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static inline bool armv8pmu_event_is_chained(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int idx = event->hw.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return !WARN_ON(idx < 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) armv8pmu_event_is_64bit(event) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) !armv8pmu_has_long_event(cpu_pmu) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) (idx != ARMV8_IDX_CYCLE_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * ARMv8 low level PMU access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Perf Event to low level counters mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define ARMV8_IDX_TO_COUNTER(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * This code is really good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #define PMEVN_CASE(n, case_macro) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case n: case_macro(n); break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define PMEVN_SWITCH(x, case_macro) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) switch (x) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) PMEVN_CASE(0, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) PMEVN_CASE(1, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) PMEVN_CASE(2, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) PMEVN_CASE(3, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) PMEVN_CASE(4, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) PMEVN_CASE(5, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) PMEVN_CASE(6, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) PMEVN_CASE(7, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) PMEVN_CASE(8, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) PMEVN_CASE(9, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) PMEVN_CASE(10, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) PMEVN_CASE(11, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) PMEVN_CASE(12, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) PMEVN_CASE(13, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) PMEVN_CASE(14, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) PMEVN_CASE(15, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) PMEVN_CASE(16, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) PMEVN_CASE(17, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) PMEVN_CASE(18, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) PMEVN_CASE(19, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) PMEVN_CASE(20, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) PMEVN_CASE(21, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) PMEVN_CASE(22, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) PMEVN_CASE(23, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) PMEVN_CASE(24, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) PMEVN_CASE(25, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) PMEVN_CASE(26, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) PMEVN_CASE(27, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) PMEVN_CASE(28, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) PMEVN_CASE(29, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) PMEVN_CASE(30, case_macro); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) default: WARN(1, "Invalid PMEV* index\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #define RETURN_READ_PMEVCNTRN(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return read_sysreg(pmevcntr##n##_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static unsigned long read_pmevcntrn(int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #define WRITE_PMEVCNTRN(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) write_sysreg(val, pmevcntr##n##_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void write_pmevcntrn(int n, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) #define WRITE_PMEVTYPERN(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) write_sysreg(val, pmevtyper##n##_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void write_pmevtypern(int n, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static inline u32 armv8pmu_pmcr_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return read_sysreg(pmcr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static inline void armv8pmu_pmcr_write(u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) val &= ARMV8_PMU_PMCR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) write_sysreg(val, pmcr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static inline int armv8pmu_has_overflowed(u32 pmovsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static inline u64 armv8pmu_read_evcntr(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 counter = ARMV8_IDX_TO_COUNTER(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return read_pmevcntrn(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int idx = event->hw.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) val = armv8pmu_read_evcntr(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (armv8pmu_event_is_chained(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * is set the event counters also become 64-bit counters. Unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * user has requested a long counter (attr.config1) then we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * interrupt upon 32-bit overflow - we achieve this by applying a bias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static bool armv8pmu_event_needs_bias(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (armv8pmu_event_is_64bit(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (armv8pmu_has_long_event(cpu_pmu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) idx == ARMV8_IDX_CYCLE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (armv8pmu_event_needs_bias(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) value |= GENMASK(63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (armv8pmu_event_needs_bias(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) value &= ~GENMASK(63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static u64 armv8pmu_read_counter(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) u64 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (idx == ARMV8_IDX_CYCLE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) value = read_sysreg(pmccntr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) value = armv8pmu_read_hw_counter(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return armv8pmu_unbias_long_counter(event, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static inline void armv8pmu_write_evcntr(int idx, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) u32 counter = ARMV8_IDX_TO_COUNTER(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) write_pmevcntrn(counter, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static inline void armv8pmu_write_hw_counter(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int idx = event->hw.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (armv8pmu_event_is_chained(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) armv8pmu_write_evcntr(idx, upper_32_bits(value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) armv8pmu_write_evcntr(idx, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void armv8pmu_write_counter(struct perf_event *event, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) value = armv8pmu_bias_long_counter(event, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (idx == ARMV8_IDX_CYCLE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) write_sysreg(value, pmccntr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) armv8pmu_write_hw_counter(event, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static inline void armv8pmu_write_evtype(int idx, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) u32 counter = ARMV8_IDX_TO_COUNTER(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) val &= ARMV8_PMU_EVTYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) write_pmevtypern(counter, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static inline void armv8pmu_write_event_type(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * For chained events, the low counter is programmed to count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * the event of interest and the high counter is programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * with CHAIN event code with filters set to count at all ELs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (armv8pmu_event_is_chained(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ARMV8_PMU_INCLUDE_EL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) armv8pmu_write_evtype(idx - 1, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) armv8pmu_write_evtype(idx, chain_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (idx == ARMV8_IDX_CYCLE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) write_sysreg(hwc->config_base, pmccfiltr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) armv8pmu_write_evtype(idx, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 mask = BIT(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (armv8pmu_event_is_chained(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) mask |= BIT(counter - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static inline void armv8pmu_enable_counter(u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * Make sure event configuration register writes are visible before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * enable the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) write_sysreg(mask, pmcntenset_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static inline void armv8pmu_enable_event_counter(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct perf_event_attr *attr = &event->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u32 mask = armv8pmu_event_cnten_mask(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) kvm_set_pmu_events(mask, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* We rely on the hypervisor switch code to enable guest counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!kvm_pmu_counter_deferred(attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) armv8pmu_enable_counter(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static inline void armv8pmu_disable_counter(u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) write_sysreg(mask, pmcntenclr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Make sure the effects of disabling the counter are visible before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * start configuring the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static inline void armv8pmu_disable_event_counter(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct perf_event_attr *attr = &event->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) u32 mask = armv8pmu_event_cnten_mask(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) kvm_clr_pmu_events(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* We rely on the hypervisor switch code to disable guest counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!kvm_pmu_counter_deferred(attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) armv8pmu_disable_counter(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static inline void armv8pmu_enable_intens(u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) write_sysreg(mask, pmintenset_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static inline void armv8pmu_enable_event_irq(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) armv8pmu_enable_intens(BIT(counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static inline void armv8pmu_disable_intens(u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) write_sysreg(mask, pmintenclr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Clear the overflow flag in case an interrupt is pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) write_sysreg(mask, pmovsclr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static inline void armv8pmu_disable_event_irq(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) armv8pmu_disable_intens(BIT(counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static inline u32 armv8pmu_getreset_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) value = read_sysreg(pmovsclr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Write to clear flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) value &= ARMV8_PMU_OVSR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) write_sysreg(value, pmovsclr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void armv8pmu_enable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Enable counter and interrupt, and set the counter to count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * the event that we're interested in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * Disable counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) armv8pmu_disable_event_counter(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Set event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) armv8pmu_write_event_type(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Enable interrupt for this counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) armv8pmu_enable_event_irq(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Enable counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) armv8pmu_enable_event_counter(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void armv8pmu_disable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * Disable counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) armv8pmu_disable_event_counter(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Disable interrupt for this counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) armv8pmu_disable_event_irq(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void armv8pmu_start(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Enable all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Disable all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) u32 pmovsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct perf_sample_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Get and reset the IRQ flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pmovsr = armv8pmu_getreset_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * Did an overflow occur?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (!armv8pmu_has_overflowed(pmovsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Handle the counter(s) overflow(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) regs = get_irq_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * Stop the PMU while processing the counter overflows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * to prevent skews in group events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) armv8pmu_stop(cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct perf_event *event = cpuc->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* Ignore if we don't have an event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * We have a single interrupt for all counters. Check that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * each counter has overflowed before we process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) armpmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) perf_sample_data_init(&data, 0, hwc->last_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!armpmu_event_set_period(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Perf event overflow will queue the processing of the event as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * an irq_work which will be taken care of in the handling of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * IPI_IRQ_WORK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (perf_event_overflow(event, &data, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) cpu_pmu->disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) armv8pmu_start(cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!test_and_set_bit(idx, cpuc->used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * Chaining requires two consecutive event counters, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * the lower idx must be even.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (!test_and_set_bit(idx, cpuc->used_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* Check if the preceding even counter is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!test_and_set_bit(idx - 1, cpuc->used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Release the Odd counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) clear_bit(idx, cpuc->used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Always prefer to place a cycle counter into the cycle counter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return ARMV8_IDX_CYCLE_COUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * Otherwise use events counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (armv8pmu_event_is_64bit(event) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) !armv8pmu_has_long_event(cpu_pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return armv8pmu_get_single_idx(cpuc, cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int idx = event->hw.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) clear_bit(idx, cpuc->used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (armv8pmu_event_is_chained(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) clear_bit(idx - 1, cpuc->used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Add an event filter to a given event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static int armv8pmu_set_event_filter(struct hw_perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) unsigned long config_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (attr->exclude_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * If we're running in hyp mode, then we *are* the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * Therefore we ignore exclude_hv in this configuration, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * there's no hypervisor to sample anyway. This is consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * with other architectures (x86 and Power).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (is_kernel_in_hyp_mode()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (!attr->exclude_kernel && !attr->exclude_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) config_base |= ARMV8_PMU_INCLUDE_EL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (attr->exclude_guest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) config_base |= ARMV8_PMU_EXCLUDE_EL1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (attr->exclude_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) config_base |= ARMV8_PMU_EXCLUDE_EL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!attr->exclude_hv && !attr->exclude_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) config_base |= ARMV8_PMU_INCLUDE_EL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * Filter out !VHE kernels and guest kernels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (attr->exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) config_base |= ARMV8_PMU_EXCLUDE_EL1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (attr->exclude_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) config_base |= ARMV8_PMU_EXCLUDE_EL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Install the filter into config_base as this is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * construct the event type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) event->config_base = config_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static int armv8pmu_filter_match(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void armv8pmu_reset(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) u32 pmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* The counter and interrupt enable registers are unknown at reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) armv8pmu_disable_counter(U32_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) armv8pmu_disable_intens(U32_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* Clear the counters we flip at guest entry/exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kvm_clr_pmu_events(U32_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Initialize & Reset PMNC. Request overflow interrupt for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * 64 bit cycle counter but cheat in armv8pmu_write_counter().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* Enable long event counter support where available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (armv8pmu_has_long_event(cpu_pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) pmcr |= ARMV8_PMU_PMCR_LP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) armv8pmu_pmcr_write(pmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static int __armv8_pmuv3_map_event(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) const unsigned (*extra_event_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) [PERF_COUNT_HW_MAX],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) const unsigned (*extra_cache_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) [PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) [PERF_COUNT_HW_CACHE_RESULT_MAX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int hw_event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) &armv8_pmuv3_perf_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ARMV8_PMU_EVTYPE_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (armv8pmu_event_is_64bit(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) event->hw.flags |= ARMPMU_EVT_64BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* Only expose micro/arch events supported by this PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return hw_event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return armpmu_map_event(event, extra_event_map, extra_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ARMV8_PMU_EVTYPE_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static int armv8_pmuv3_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return __armv8_pmuv3_map_event(event, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static int armv8_a53_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static int armv8_a57_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static int armv8_a73_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static int armv8_thunder_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return __armv8_pmuv3_map_event(event, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) &armv8_thunder_perf_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static int armv8_vulcan_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return __armv8_pmuv3_map_event(event, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) &armv8_vulcan_perf_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct armv8pmu_probe_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct arm_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) bool present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void __armv8pmu_probe_pmu(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct armv8pmu_probe_info *probe = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct arm_pmu *cpu_pmu = probe->pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) u64 dfr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) u64 pmceid_raw[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) u32 pmceid[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int pmuver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) dfr0 = read_sysreg(id_aa64dfr0_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pmuver = cpuid_feature_extract_unsigned_field(dfr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ID_AA64DFR0_PMUVER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (pmuver == 0xf || pmuver == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) cpu_pmu->pmuver = pmuver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) probe->present = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Read the nb of CNTx counters supported from PMNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) & ARMV8_PMU_PMCR_N_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* Add the CPU cycles counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) cpu_pmu->num_events += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pmceid[0] = pmceid_raw[0] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) pmceid[1] = pmceid_raw[1] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* store PMMIR_EL1 register for sysfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) cpu_pmu->reg_pmmir = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct armv8pmu_probe_info probe = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) .pmu = cpu_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .present = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = smp_call_function_any(&cpu_pmu->supported_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) __armv8pmu_probe_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) &probe, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return probe.present ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int (*map_event)(struct perf_event *event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) const struct attribute_group *events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) const struct attribute_group *format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) const struct attribute_group *caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) int ret = armv8pmu_probe_pmu(cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) cpu_pmu->handle_irq = armv8pmu_handle_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) cpu_pmu->enable = armv8pmu_enable_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) cpu_pmu->disable = armv8pmu_disable_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) cpu_pmu->read_counter = armv8pmu_read_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) cpu_pmu->write_counter = armv8pmu_write_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) cpu_pmu->start = armv8pmu_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) cpu_pmu->stop = armv8pmu_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) cpu_pmu->reset = armv8pmu_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) cpu_pmu->filter_match = armv8pmu_filter_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) cpu_pmu->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) cpu_pmu->map_event = map_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) events : &armv8_pmuv3_events_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) format : &armv8_pmuv3_format_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) caps : &armv8_pmuv3_caps_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) int (*map_event)(struct perf_event *event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) armv8_a53_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) armv8_a53_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) armv8_a57_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) armv8_a57_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) armv8_a73_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) armv8_pmuv3_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) armv8_thunder_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) armv8_vulcan_map_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static const struct of_device_id armv8_pmu_of_device_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int armv8_pmu_device_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static struct platform_driver armv8_pmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) .name = ARMV8_PMU_PDEV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) .of_match_table = armv8_pmu_of_device_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) .probe = armv8_pmu_device_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static int __init armv8_pmu_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (acpi_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return platform_driver_register(&armv8_pmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return arm_pmu_acpi_probe(armv8_pmuv3_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) device_initcall(armv8_pmu_driver_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) void arch_perf_update_userpage(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct perf_event_mmap_page *userpg, u64 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct clock_read_data *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) u64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) userpg->cap_user_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) userpg->cap_user_time_zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) userpg->cap_user_time_short = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) rd = sched_clock_read_begin(&seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (rd->read_sched_clock != arch_timer_read_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) userpg->time_mult = rd->mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) userpg->time_shift = rd->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) userpg->time_zero = rd->epoch_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) userpg->time_cycles = rd->epoch_cyc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) userpg->time_mask = rd->sched_clock_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * Subtract the cycle base, such that software that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * doesn't know about cap_user_time_short still 'works'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * assuming no wraps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) userpg->time_zero -= ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) } while (sched_clock_read_retry(seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) userpg->time_offset = userpg->time_zero - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * time_shift is not expected to be greater than 31 due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * the original published conversion algorithm shifting a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * 32-bit value (now specifies a 64-bit value) - refer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * perf_event_mmap_page documentation in perf_event.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (userpg->time_shift == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) userpg->time_shift = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) userpg->time_mult >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * Internal timekeeping for enabled/running/stopped times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * is always computed with the sched_clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) userpg->cap_user_time = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) userpg->cap_user_time_zero = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) userpg->cap_user_time_short = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }