^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * CAVIUM THUNDERX2 SoC PMU UNCORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2018 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Each UNCORE PMU device consists of 4 independent programmable counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Counters are 32 bit and do not support overflow interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * they need to be sampled before overflow(i.e, at every 2 seconds).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define TX2_PMU_DMC_L3C_MAX_COUNTERS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define TX2_PMU_CCPI2_MAX_COUNTERS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TX2_PMU_MAX_COUNTERS TX2_PMU_CCPI2_MAX_COUNTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define TX2_PMU_DMC_CHANNELS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define TX2_PMU_L3_TILES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define GET_EVENTID(ev, mask) ((ev->hw.config) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define GET_COUNTERID(ev, mask) ((ev->hw.idx) & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* 1 byte per counter(4 counters).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Event id is encoded in bits [5:1] of a byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* bits[3:0] to select counters, are indexed from 8 to 15. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CCPI2_COUNTER_OFFSET 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define L3C_COUNTER_CTL 0xA8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define L3C_COUNTER_DATA 0xAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DMC_COUNTER_CTL 0x234
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DMC_COUNTER_DATA 0x240
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CCPI2_PERF_CTL 0x108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CCPI2_COUNTER_CTL 0x10C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CCPI2_COUNTER_SEL 0x12c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CCPI2_COUNTER_DATA_L 0x130
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CCPI2_COUNTER_DATA_H 0x134
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* L3C event IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define L3_EVENT_READ_REQ 0xD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define L3_EVENT_WRITEBACK_REQ 0xE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define L3_EVENT_INV_N_WRITE_REQ 0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define L3_EVENT_INV_REQ 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define L3_EVENT_EVICT_REQ 0x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define L3_EVENT_INV_N_WRITE_HIT 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define L3_EVENT_INV_HIT 0x15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define L3_EVENT_READ_HIT 0x17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define L3_EVENT_MAX 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* DMC event IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DMC_EVENT_COUNT_CYCLES 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DMC_EVENT_WRITE_TXNS 0xB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DMC_EVENT_DATA_TRANSFERS 0xD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DMC_EVENT_READ_TXNS 0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define DMC_EVENT_MAX 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define CCPI2_EVENT_REQ_PKT_SENT 0x3D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define CCPI2_EVENT_SNOOP_PKT_SENT 0x65
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define CCPI2_EVENT_DATA_PKT_SENT 0x105
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define CCPI2_EVENT_GIC_PKT_SENT 0x12D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define CCPI2_EVENT_MAX 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define CCPI2_PERF_CTL_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define CCPI2_PERF_CTL_START BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define CCPI2_PERF_CTL_RESET BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define CCPI2_EVENT_LEVEL_RISING_EDGE BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define CCPI2_EVENT_TYPE_EDGE_SENSITIVE BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum tx2_uncore_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) PMU_TYPE_L3C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) PMU_TYPE_DMC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) PMU_TYPE_CCPI2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) PMU_TYPE_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Each socket has 3 uncore devices associated with a PMU. The DMC and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct tx2_uncore_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct hlist_node hpnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct pmu pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 max_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 prorate_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 max_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u32 events_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u64 hrtimer_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct perf_event *events[TX2_PMU_MAX_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct hrtimer hrtimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) const struct attribute_group **attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) enum tx2_uncore_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void (*init_cntr_base)(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct tx2_uncore_pmu *tx2_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void (*stop_event)(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void (*start_event)(struct perf_event *event, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static LIST_HEAD(tx2_pmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return container_of(pmu, struct tx2_uncore_pmu, pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define TX2_PMU_FORMAT_ATTR(_var, _name, _format) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static ssize_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) __tx2_pmu_##_var##_show(struct device *dev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct device_attribute *attr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) char *page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return sprintf(page, _format "\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static struct device_attribute format_attr_##_var = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __ATTR(_name, 0444, __tx2_pmu_##_var##_show, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) TX2_PMU_FORMAT_ATTR(event, event, "config:0-4");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static struct attribute *l3c_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static struct attribute *dmc_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static struct attribute *ccpi2_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &format_attr_event_ccpi2.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static const struct attribute_group l3c_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .attrs = l3c_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static const struct attribute_group dmc_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .attrs = dmc_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static const struct attribute_group ccpi2_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .attrs = ccpi2_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * sysfs event attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static ssize_t tx2_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct dev_ext_attribute *eattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) eattr = container_of(attr, struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define TX2_EVENT_ATTR(name, config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) config, tx2_pmu_event_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static struct attribute *l3c_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) &tx2_pmu_event_attr_read_request.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) &tx2_pmu_event_attr_writeback_request.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) &tx2_pmu_event_attr_inv_request.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) &tx2_pmu_event_attr_evict_request.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) &tx2_pmu_event_attr_inv_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) &tx2_pmu_event_attr_read_hit.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static struct attribute *dmc_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) &tx2_pmu_event_attr_cnt_cycles.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) &tx2_pmu_event_attr_write_txns.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) &tx2_pmu_event_attr_data_transfers.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) &tx2_pmu_event_attr_read_txns.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) TX2_EVENT_ATTR(req_pktsent, CCPI2_EVENT_REQ_PKT_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) TX2_EVENT_ATTR(snoop_pktsent, CCPI2_EVENT_SNOOP_PKT_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) TX2_EVENT_ATTR(data_pktsent, CCPI2_EVENT_DATA_PKT_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) TX2_EVENT_ATTR(gic_pktsent, CCPI2_EVENT_GIC_PKT_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static struct attribute *ccpi2_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) &tx2_pmu_event_attr_req_pktsent.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) &tx2_pmu_event_attr_snoop_pktsent.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) &tx2_pmu_event_attr_data_pktsent.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) &tx2_pmu_event_attr_gic_pktsent.attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static const struct attribute_group l3c_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .attrs = l3c_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static const struct attribute_group dmc_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .attrs = dmc_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static const struct attribute_group ccpi2_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .attrs = ccpi2_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * sysfs cpumask attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static DEVICE_ATTR_RO(cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static struct attribute *tx2_pmu_cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) &dev_attr_cpumask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static const struct attribute_group pmu_cpumask_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .attrs = tx2_pmu_cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Per PMU device attribute groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static const struct attribute_group *l3c_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) &l3c_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) &l3c_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static const struct attribute_group *dmc_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) &dmc_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) &dmc_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static const struct attribute_group *ccpi2_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) &ccpi2_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) &ccpi2_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline u32 reg_readl(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return readl((void __iomem *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static inline void reg_writel(u32 val, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) writel(val, (void __iomem *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) counter = find_first_zero_bit(tx2_pmu->active_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tx2_pmu->max_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (counter == tx2_pmu->max_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) set_bit(counter, tx2_pmu->active_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) clear_bit(counter, tx2_pmu->active_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void init_cntr_base_l3c(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct tx2_uncore_pmu *tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u32 cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* counter ctrl/data reg offset at 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) hwc->config_base = (unsigned long)tx2_pmu->base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) hwc->event_base = (unsigned long)tx2_pmu->base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static void init_cntr_base_dmc(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct tx2_uncore_pmu *tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) hwc->config_base = (unsigned long)tx2_pmu->base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) + DMC_COUNTER_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* counter data reg offset at 0xc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) hwc->event_base = (unsigned long)tx2_pmu->base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void init_cntr_base_ccpi2(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct tx2_uncore_pmu *tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u32 cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) hwc->config_base = (unsigned long)tx2_pmu->base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) + CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) hwc->event_base = (unsigned long)tx2_pmu->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void uncore_start_event_l3c(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 val, emask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) emask = tx2_pmu->events_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* event id encoded in bits [07:03] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) val = GET_EVENTID(event, emask) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) reg_writel(val, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) local64_set(&hwc->prev_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) reg_writel(0, hwc->event_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static inline void uncore_stop_event_l3c(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) reg_writel(0, event->hw.config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void uncore_start_event_dmc(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u32 val, cmask, emask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int idx, event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) emask = tx2_pmu->events_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) idx = GET_COUNTERID(event, cmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) event_id = GET_EVENTID(event, emask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* enable and start counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * 8 bits for each counter, bits[05:01] of a counter to set event type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) val = reg_readl(hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) val &= ~DMC_EVENT_CFG(idx, 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) val |= DMC_EVENT_CFG(idx, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) reg_writel(val, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) local64_set(&hwc->prev_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) reg_writel(0, hwc->event_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void uncore_stop_event_dmc(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u32 val, cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) idx = GET_COUNTERID(event, cmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* clear event type(bits[05:01]) to stop counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) val = reg_readl(hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) val &= ~DMC_EVENT_CFG(idx, 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) reg_writel(val, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void uncore_start_event_ccpi2(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 emask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) emask = tx2_pmu->events_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* Bit [09:00] to set event id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Bits [10], set level to rising edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Bits [11], set type to edge sensitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) reg_writel((CCPI2_EVENT_TYPE_EDGE_SENSITIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) CCPI2_EVENT_LEVEL_RISING_EDGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) GET_EVENTID(event, emask)), hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* reset[4], enable[0] and start[1] counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) reg_writel(CCPI2_PERF_CTL_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) CCPI2_PERF_CTL_START |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) CCPI2_PERF_CTL_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hwc->event_base + CCPI2_PERF_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) local64_set(&event->hw.prev_count, 0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void uncore_stop_event_ccpi2(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* disable and stop counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) reg_writel(0, hwc->event_base + CCPI2_PERF_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void tx2_uncore_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u64 prev, delta, new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) enum tx2_uncore_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u32 prorate_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 cmask, emask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) type = tx2_pmu->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) emask = tx2_pmu->events_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) prorate_factor = tx2_pmu->prorate_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (type == PMU_TYPE_CCPI2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) reg_writel(CCPI2_COUNTER_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) GET_COUNTERID(event, cmask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) hwc->event_base + CCPI2_COUNTER_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) new = (new << 32) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) prev = local64_xchg(&hwc->prev_count, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) delta = new - prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) new = reg_readl(hwc->event_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) prev = local64_xchg(&hwc->prev_count, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* handles rollover of 32 bit counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) delta = (u32)(((1UL << 32) - prev) + new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (type == PMU_TYPE_DMC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) delta = delta/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* L3C and DMC has 16 and 8 interleave channels respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * The sampled value is for channel 0 and multiplied with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * prorate_factor to get the count for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) local64_add(delta * prorate_factor, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct acpi_tx2_pmu_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) __u8 id[ACPI_ID_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) enum tx2_uncore_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } devices[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {"CAV901D", PMU_TYPE_L3C},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {"CAV901F", PMU_TYPE_DMC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {"CAV901E", PMU_TYPE_CCPI2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {"", PMU_TYPE_INVALID}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) while (devices[i].type != PMU_TYPE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!strcmp(acpi_device_hid(adev), devices[i].id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return devices[i].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static bool tx2_uncore_validate_event(struct pmu *pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct perf_event *event, int *counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (is_software_event(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Reject groups spanning multiple HW PMUs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (event->pmu != pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *counters = *counters + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Make sure the group of events can be scheduled at once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * on the PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static bool tx2_uncore_validate_event_group(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int max_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct perf_event *sibling, *leader = event->group_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int counters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (event->group_leader == event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for_each_sibling_event(sibling, leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!tx2_uncore_validate_event(event->pmu, event, &counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * If the group requires more counters than the HW has,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * it cannot ever be scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return counters <= max_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int tx2_uncore_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Test the event attr type check for PMU enumeration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * SOC PMU counters are shared across all cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * Therefore, it does not support per-process mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * Also, it does not support event sampling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (event->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (tx2_pmu->cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) event->cpu = tx2_pmu->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (event->attr.config >= tx2_pmu->max_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* store event id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) hwc->config = event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Validate the group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void tx2_uncore_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) tx2_pmu->start_event(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* No hrtimer needed for CCPI2, 64-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!tx2_pmu->hrtimer_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Start timer for first event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (bitmap_weight(tx2_pmu->active_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) tx2_pmu->max_counters) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) hrtimer_start(&tx2_pmu->hrtimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ns_to_ktime(tx2_pmu->hrtimer_interval),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) HRTIMER_MODE_REL_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static void tx2_uncore_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (hwc->state & PERF_HES_UPTODATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tx2_pmu->stop_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) hwc->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (flags & PERF_EF_UPDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) tx2_uncore_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) hwc->state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int tx2_uncore_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Allocate a free counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) hwc->idx = alloc_counter(tx2_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (hwc->idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) tx2_pmu->events[hwc->idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* set counter control and data registers base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) tx2_pmu->init_cntr_base(event, tx2_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tx2_uncore_event_start(event, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static void tx2_uncore_event_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u32 cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) cmask = tx2_pmu->counters_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) tx2_uncore_event_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* clear the assigned counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) tx2_pmu->events[hwc->idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) hwc->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!tx2_pmu->hrtimer_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) hrtimer_cancel(&tx2_pmu->hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static void tx2_uncore_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) tx2_uncore_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int max_counters, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) max_counters = tx2_pmu->max_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (bitmap_empty(tx2_pmu->active_counters, max_counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct perf_event *event = tx2_pmu->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) tx2_uncore_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int tx2_uncore_pmu_register(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct tx2_uncore_pmu *tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct device *dev = tx2_pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) char *name = tx2_pmu->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* Perf event registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) tx2_pmu->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) .attr_groups = tx2_pmu->attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .event_init = tx2_uncore_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .add = tx2_uncore_event_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .del = tx2_uncore_event_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .start = tx2_uncore_event_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .stop = tx2_uncore_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .read = tx2_uncore_event_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) "%s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int ret, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tx2_pmu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (tx2_pmu->hrtimer_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) hrtimer_init(&tx2_pmu->hrtimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ret = tx2_uncore_pmu_register(tx2_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) tx2_pmu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* register hotplug callback for the pmu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ret = cpuhp_state_add_instance(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) &tx2_pmu->hpnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* Add to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) list_add(&tx2_pmu->entry, &tx2_pmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) tx2_pmu->pmu.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) acpi_handle handle, struct acpi_device *adev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct resource_entry *rentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) list_for_each_entry(rentry, &list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (resource_type(rentry->res) == IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) res = *rentry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) rentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) acpi_dev_free_resource_list(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (rentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_err(dev, "PMU type %d: Fail to find resource\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) base = devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (IS_ERR(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) dev_err(dev, "PMU type %d: Fail to map resource\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) tx2_pmu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) tx2_pmu->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) tx2_pmu->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) tx2_pmu->node = dev_to_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) INIT_LIST_HEAD(&tx2_pmu->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) switch (tx2_pmu->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case PMU_TYPE_L3C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) tx2_pmu->counters_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tx2_pmu->max_events = L3_EVENT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tx2_pmu->events_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) tx2_pmu->attr_groups = l3c_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) "uncore_l3c_%d", tx2_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) tx2_pmu->init_cntr_base = init_cntr_base_l3c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) tx2_pmu->start_event = uncore_start_event_l3c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) tx2_pmu->stop_event = uncore_stop_event_l3c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) case PMU_TYPE_DMC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) tx2_pmu->counters_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tx2_pmu->max_events = DMC_EVENT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tx2_pmu->events_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) tx2_pmu->attr_groups = dmc_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) "uncore_dmc_%d", tx2_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) tx2_pmu->init_cntr_base = init_cntr_base_dmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) tx2_pmu->start_event = uncore_start_event_dmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) tx2_pmu->stop_event = uncore_stop_event_dmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case PMU_TYPE_CCPI2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* CCPI2 has 8 counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) tx2_pmu->counters_mask = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) tx2_pmu->prorate_factor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) tx2_pmu->max_events = CCPI2_EVENT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) tx2_pmu->events_mask = 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) "uncore_ccpi2_%d", tx2_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) tx2_pmu->start_event = uncore_start_event_ccpi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tx2_pmu->stop_event = uncore_stop_event_ccpi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) tx2_pmu->hrtimer_callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) case PMU_TYPE_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) devm_kfree(dev, tx2_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) void *data, void **return_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) enum tx2_uncore_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (acpi_bus_get_device(handle, &adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (acpi_bus_get_status(adev) || !adev->status.present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) type = get_tx2_pmu_type(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (type == PMU_TYPE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) handle, adev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (!tx2_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* Can't add the PMU device, abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return AE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct hlist_node *hpnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) tx2_pmu = hlist_entry_safe(hpnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct tx2_uncore_pmu, hpnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Pick this CPU, If there is no CPU/PMU association and both are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * from same node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if ((tx2_pmu->cpu >= nr_cpu_ids) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) (tx2_pmu->node == cpu_to_node(cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tx2_pmu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct hlist_node *hpnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) int new_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct tx2_uncore_pmu *tx2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct cpumask cpu_online_mask_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) tx2_pmu = hlist_entry_safe(hpnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct tx2_uncore_pmu, hpnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (cpu != tx2_pmu->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (tx2_pmu->hrtimer_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) hrtimer_cancel(&tx2_pmu->hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) new_cpu = cpumask_any_and(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) cpumask_of_node(tx2_pmu->node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) &cpu_online_mask_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) tx2_pmu->cpu = new_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (new_cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static const struct acpi_device_id tx2_uncore_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {"CAV901C", 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static int tx2_uncore_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) acpi_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!has_acpi_companion(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) handle = ACPI_HANDLE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* Walk through the tree for all PMU UNCORE devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) tx2_uncore_pmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) NULL, dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_err(dev, "failed to probe PMU devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return_ACPI_STATUS(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static int tx2_uncore_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct tx2_uncore_pmu *tx2_pmu, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!list_empty(&tx2_pmus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (tx2_pmu->node == dev_to_node(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) cpuhp_state_remove_instance_nocalls(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) &tx2_pmu->hpnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) perf_pmu_unregister(&tx2_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) list_del(&tx2_pmu->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static struct platform_driver tx2_uncore_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .name = "tx2-uncore-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .probe = tx2_uncore_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .remove = tx2_uncore_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static int __init tx2_uncore_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) "perf/tx2/uncore:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) tx2_uncore_pmu_online_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) tx2_uncore_pmu_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ret = platform_driver_register(&tx2_uncore_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) module_init(tx2_uncore_driver_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static void __exit tx2_uncore_driver_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) platform_driver_unregister(&tx2_uncore_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) module_exit(tx2_uncore_driver_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");