^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This driver adds support for perf events to use the Performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to monitor that node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * to 4K boundary. For example, the PMCG at 0xff88840000 is named
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * smmuv3_pmcg_ff88840
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Filtering by stream id is done by specifying filtering parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * with the event. options are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * filter_enable - 0 = no filtering, 1 = filtering enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * filter_span - 0 = exact match, 1 = pattern match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * filter_stream_id - pattern to filter against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * To match a partial StreamID where the X most-significant bits must match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * but the Y least-significant bits might differ, STREAMID is programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * with a value that contains:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * STREAMID[Y - 1] == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * STREAMID[Y - 2:0] == 1 (where Y > 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * contain a value to match from the corresponding bits of event StreamID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * filter_span=1,filter_stream_id=0x42/ -a netperf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Applies filter pattern 0x42 to transaction events, which means events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * matching stream ids 0x42 and 0x43 are counted. Further filtering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * information is available in the SMMU documentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * SMMU events are not attributable to a CPU, so task mode and sampling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * are not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/acpi_iort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SMMU_PMCG_EVCNTR0 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SMMU_PMCG_EVTYPER0 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SMMU_PMCG_SID_SPAN_SHIFT 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SMMU_PMCG_SMR0 0xA00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SMMU_PMCG_CNTENSET0 0xC00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SMMU_PMCG_CNTENCLR0 0xC20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SMMU_PMCG_INTENSET0 0xC40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SMMU_PMCG_INTENCLR0 0xC60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SMMU_PMCG_OVSCLR0 0xC80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SMMU_PMCG_OVSSET0 0xCC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SMMU_PMCG_CFGR 0xE00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SMMU_PMCG_CFGR_MSI BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SMMU_PMCG_CR 0xE04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SMMU_PMCG_CR_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SMMU_PMCG_CEID0 0xE20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SMMU_PMCG_CEID1 0xE28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SMMU_PMCG_IRQ_CTRL 0xE50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SMMU_PMCG_IRQ_CFG0 0xE58
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SMMU_PMCG_IRQ_CFG1 0xE60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SMMU_PMCG_IRQ_CFG2 0xE64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* MSI config fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SMMU_PMCG_MAX_COUNTERS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define SMMU_PMCG_ARCH_MAX_EVENTS 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define SMMU_PMCG_PA_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static int cpuhp_state_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct smmu_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int on_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct pmu pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned int num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void __iomem *reloc_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u64 counter_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bool global_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline u32 get_##_name(struct perf_event *event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return FIELD_GET(GENMASK_ULL(_end, _start), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) event->attr._config); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline void smmu_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline void smmu_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u32 idx, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (smmu_pmu->counter_mask & BIT(32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (smmu_pmu->counter_mask & BIT(32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void smmu_pmu_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u64 delta, prev, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) prev = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) now = smmu_pmu_counter_get_value(smmu_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* handle overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) delta = now - prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) delta &= smmu_pmu->counter_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct hw_perf_event *hwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u64 new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * On platforms that require this quirk, if the counter starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * at < half_counter value and wraps, the current logic of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * handling the overflow may not work. It is expected that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * those platforms will have full 64 counter bits implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * so that such a possibility is remote(eg: HiSilicon HIP08).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) new = smmu_pmu_counter_get_value(smmu_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * We limit the max period to half the max counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * of the counter size, so that even in the case of extreme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * interrupt latency the counter will (hopefully) not wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * past its initial value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) new = smmu_pmu->counter_mask >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) smmu_pmu_counter_set_value(smmu_pmu, idx, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) local64_set(&hwc->prev_count, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void smmu_pmu_set_event_filter(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int idx, u32 span, u32 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 evtyper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) smmu_pmu_set_smr(smmu_pmu, idx, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static bool smmu_pmu_check_global_filter(struct perf_event *curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct perf_event *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (get_filter_enable(new) != get_filter_enable(curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!get_filter_enable(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return get_filter_span(new) == get_filter_span(curr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) get_filter_stream_id(new) == get_filter_stream_id(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct perf_event *event, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 span, sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bool filter_en = !!get_filter_enable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) span = filter_en ? get_filter_span(event) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) SMMU_PMCG_DEFAULT_FILTER_SPAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) sid = filter_en ? get_filter_stream_id(event) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) SMMU_PMCG_DEFAULT_FILTER_SID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Per-counter filtering, or scheduling the first globally-filtered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * event into an empty PMU so idx == 0 and it works out equivalent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) smmu_pmu_set_event_filter(event, idx, span, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Otherwise, must match whatever's currently scheduled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int idx, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned int num_ctrs = smmu_pmu->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (idx == num_ctrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* The counters are all in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) set_bit(idx, smmu_pmu->used_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static bool smmu_pmu_events_compatible(struct perf_event *curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct perf_event *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (new->pmu != curr->pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (to_smmu_pmu(new->pmu)->global_filter &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) !smmu_pmu_check_global_filter(curr, new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * Implementation of abstract pmu functionality required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * the core perf events code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int smmu_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct device *dev = smmu_pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct perf_event *sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int group_num_events = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u16 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (hwc->sample_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dev_dbg(dev, "Sampling not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (event->cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev_dbg(dev, "Per-task mode not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Verify specified event is supported on this PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) event_id = get_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) (!test_bit(event_id, smmu_pmu->supported_events))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Don't allow groups with mixed PMUs, except for s/w events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!is_software_event(event->group_leader)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!smmu_pmu_events_compatible(event->group_leader, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (++group_num_events > smmu_pmu->num_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) for_each_sibling_event(sibling, event->group_leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (is_software_event(sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (!smmu_pmu_events_compatible(sibling, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (++group_num_events > smmu_pmu->num_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) hwc->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Ensure all events are on the same cpu so all events are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * same cpu context, to avoid races on pmu_enable etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) event->cpu = smmu_pmu->on_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void smmu_pmu_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) smmu_pmu_set_period(smmu_pmu, hwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) smmu_pmu_counter_enable(smmu_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void smmu_pmu_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (hwc->state & PERF_HES_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) smmu_pmu_counter_disable(smmu_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) smmu_pmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int smmu_pmu_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) idx = smmu_pmu_get_event_idx(smmu_pmu, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) hwc->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) smmu_pmu->events[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) local64_set(&hwc->prev_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) smmu_pmu_interrupt_enable(smmu_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) smmu_pmu_event_start(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* Propagate changes to the userspace mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void smmu_pmu_event_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) smmu_pmu_interrupt_disable(smmu_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) smmu_pmu->events[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) clear_bit(idx, smmu_pmu->used_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void smmu_pmu_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) smmu_pmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* cpumask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static ssize_t smmu_pmu_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static struct device_attribute smmu_pmu_cpumask_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static struct attribute *smmu_pmu_cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) &smmu_pmu_cpumask_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static struct attribute_group smmu_pmu_cpumask_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .attrs = smmu_pmu_cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static ssize_t smmu_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct device_attribute *attr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct perf_pmu_events_attr *pmu_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #define SMMU_EVENT_ATTR(name, config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) config, smmu_pmu_event_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) SMMU_EVENT_ATTR(cycles, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) SMMU_EVENT_ATTR(transaction, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) SMMU_EVENT_ATTR(tlb_miss, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) SMMU_EVENT_ATTR(config_cache_miss, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) SMMU_EVENT_ATTR(trans_table_walk_access, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) SMMU_EVENT_ATTR(config_struct_access, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static struct attribute *smmu_pmu_events[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) &smmu_event_attr_cycles.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) &smmu_event_attr_transaction.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) &smmu_event_attr_tlb_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) &smmu_event_attr_config_cache_miss.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) &smmu_event_attr_trans_table_walk_access.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) &smmu_event_attr_config_struct_access.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct attribute *attr, int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct perf_pmu_events_attr *pmu_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static struct attribute_group smmu_pmu_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .attrs = smmu_pmu_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .is_visible = smmu_pmu_event_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Formats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) PMU_FORMAT_ATTR(event, "config:0-15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) PMU_FORMAT_ATTR(filter_span, "config1:32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) PMU_FORMAT_ATTR(filter_enable, "config1:33");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static struct attribute *smmu_pmu_formats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) &format_attr_filter_stream_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) &format_attr_filter_span.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) &format_attr_filter_enable.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static struct attribute_group smmu_pmu_format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) .attrs = smmu_pmu_formats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static const struct attribute_group *smmu_pmu_attr_grps[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) &smmu_pmu_cpumask_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) &smmu_pmu_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) &smmu_pmu_format_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Generic device handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct smmu_pmu *smmu_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (cpu != smmu_pmu->on_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) target = cpumask_any_but(cpu_online_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (target >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) smmu_pmu->on_cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct smmu_pmu *smmu_pmu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u64 ovsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!ovsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct perf_event *event = smmu_pmu->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (WARN_ON_ONCE(!event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) smmu_pmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) smmu_pmu_set_period(smmu_pmu, hwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void smmu_pmu_free_msis(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) platform_msi_domain_free_irqs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) phys_addr_t doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct device *dev = msi_desc_to_dev(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct smmu_pmu *pmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) doorbell &= MSI_CFG0_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct msi_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct device *dev = pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Clear MSI address reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* MSI supported or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dev_warn(dev, "failed to allocate MSIs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) desc = first_msi_entry(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pmu->irq = desc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Add callback to free MSIs on teardown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) devm_add_action(dev, smmu_pmu_free_msis, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int irq, ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) smmu_pmu_setup_msi(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) irq = pmu->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) flags, "smmuv3-pmu", pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) smmu_pmu_disable(&smmu_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Disable counter and interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) writeq_relaxed(counter_present_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) writeq_relaxed(counter_present_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) writeq_relaxed(counter_present_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) u32 model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) switch (model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) case IORT_SMMU_V3_PMCG_HISI_HIP08:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* HiSilicon Erratum 162001800 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static int smmu_pmu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct smmu_pmu *smmu_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct resource *res_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) u32 cfgr, reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) u64 ceid_64[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int irq, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!smmu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) smmu_pmu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) platform_set_drvdata(pdev, smmu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) smmu_pmu->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) .task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) .pmu_enable = smmu_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) .pmu_disable = smmu_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .event_init = smmu_pmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .add = smmu_pmu_event_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .del = smmu_pmu_event_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .start = smmu_pmu_event_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .stop = smmu_pmu_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .read = smmu_pmu_event_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .attr_groups = smmu_pmu_attr_grps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (IS_ERR(smmu_pmu->reg_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return PTR_ERR(smmu_pmu->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Determine if page 1 is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (IS_ERR(smmu_pmu->reloc_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return PTR_ERR(smmu_pmu->reloc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) smmu_pmu->reloc_base = smmu_pmu->reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) irq = platform_get_irq_optional(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) smmu_pmu->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) SMMU_PMCG_ARCH_MAX_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) smmu_pmu_reset(smmu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) err = smmu_pmu_setup_irq(smmu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) (res_0->start) >> SMMU_PMCG_PA_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) smmu_pmu_get_acpi_options(smmu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* Pick one CPU to be the preferred one to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) smmu_pmu->on_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cpumask_of(smmu_pmu->on_cpu)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) &smmu_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) err, &res_0->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) goto out_clear_affinity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) dev_err(dev, "Error %d registering PMU @%pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) err, &res_0->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) &res_0->start, smmu_pmu->num_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) smmu_pmu->global_filter ? "Global(Counter0)" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) "Individual");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) out_clear_affinity:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) irq_set_affinity_hint(smmu_pmu->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int smmu_pmu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) perf_pmu_unregister(&smmu_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) irq_set_affinity_hint(smmu_pmu->irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static void smmu_pmu_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) smmu_pmu_disable(&smmu_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static struct platform_driver smmu_pmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) .name = "arm-smmu-v3-pmcg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) .probe = smmu_pmu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) .remove = smmu_pmu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) .shutdown = smmu_pmu_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static int __init arm_smmu_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) "perf/arm/pmcg:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) smmu_pmu_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (cpuhp_state_num < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return cpuhp_state_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return platform_driver_register(&smmu_pmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) module_init(arm_smmu_pmu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static void __exit arm_smmu_pmu_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) platform_driver_unregister(&smmu_pmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) cpuhp_remove_multi_state(cpuhp_state_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) module_exit(arm_smmu_pmu_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) MODULE_LICENSE("GPL v2");