^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * APM X-Gene SoC PMU (Performance Monitor Unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2016, Applied Micro Circuits Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Hoan Tran <hotran@apm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Tai Nguyen <ttnguyen@apm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_fdt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define CSW_CSWCR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define CSW_CSWCR_DUALMCB_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define MCBADDRMR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PCPPMU_INTSTATUS_REG 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PCPPMU_INTMASK_REG 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PCPPMU_INTMASK 0x0000000F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PCPPMU_INTENMASK 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PCPPMU_INTCLRMASK 0xFFFFFFF0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PCPPMU_INT_MCU BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PCPPMU_INT_MCB BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PCPPMU_INT_L3C BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PCPPMU_INT_IOB BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PCPPMU_V3_INTMASK 0x00FF33FF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define PCPPMU_V3_INTENMASK 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PCPPMU_V3_INTCLRMASK 0xFF00CC00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define PCPPMU_V3_INT_MCU 0x000000FF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PCPPMU_V3_INT_MCB 0x00000300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PCPPMU_V3_INT_L3C 0x00FF0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PCPPMU_V3_INT_IOB 0x00003000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PMU_MAX_COUNTERS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PMU_OVERFLOW_MASK 0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define PMU_PMCR_E BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PMU_PMCR_P BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PMU_PMEVCNTR0 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PMU_PMEVCNTR1 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PMU_PMEVCNTR2 0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define PMU_PMEVCNTR3 0x00C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PMU_PMEVTYPER0 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PMU_PMEVTYPER1 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PMU_PMEVTYPER2 0x408
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PMU_PMEVTYPER3 0x40C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PMU_PMAMR0 0xA00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PMU_PMAMR1 0xA04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PMU_PMCNTENSET 0xC00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PMU_PMCNTENCLR 0xC20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PMU_PMINTENSET 0xC40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PMU_PMINTENCLR 0xC60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PMU_PMOVSR 0xC80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PMU_PMCR 0xE04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* PMU registers for V3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PMU_PMOVSCLR 0xC80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define PMU_PMOVSSET 0xCC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define GET_CNTR(ev) (ev->hw.idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct hw_pmu_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 enable_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void __iomem *csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct xgene_pmu_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct hw_pmu_info *inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct xgene_pmu *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct pmu pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u8 max_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u64 max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) const struct attribute_group **attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct xgene_pmu_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void (*mask_int)(struct xgene_pmu *pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void (*unmask_int)(struct xgene_pmu *pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void (*reset_counters)(struct xgene_pmu_dev *pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void (*start_counters)(struct xgene_pmu_dev *pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void (*stop_counters)(struct xgene_pmu_dev *pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct xgene_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void __iomem *pcppmu_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 mcb_active_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 mc_active_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 l3c_active_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cpumask_t cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const struct xgene_pmu_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct list_head l3cpmus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct list_head iobpmus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct list_head mcbpmus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct list_head mcpmus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct xgene_pmu_dev_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct xgene_pmu_dev *pmu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct hw_pmu_info inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct xgene_pmu_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) enum xgene_pmu_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) PCP_PMU_V1 = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) PCP_PMU_V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) PCP_PMU_V3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) enum xgene_pmu_dev_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) PMU_TYPE_L3C = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) PMU_TYPE_IOB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) PMU_TYPE_IOB_SLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) PMU_TYPE_MCB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) PMU_TYPE_MC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * sysfs format attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static ssize_t xgene_pmu_format_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct dev_ext_attribute *eattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) eattr = container_of(attr, struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return sprintf(buf, "%s\n", (char *) eattr->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define XGENE_PMU_FORMAT_ATTR(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) (&((struct dev_ext_attribute[]) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .var = (void *) _config, } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) })[0].attr.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct attribute *l3c_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static struct attribute *iob_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static struct attribute *mcb_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static struct attribute *mc_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static const struct attribute_group l3c_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .attrs = l3c_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static const struct attribute_group iob_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .attrs = iob_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static const struct attribute_group mcb_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .attrs = mcb_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static const struct attribute_group mc_pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .attrs = mc_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static struct attribute *l3c_pmu_v3_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct attribute *iob_pmu_v3_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static struct attribute *iob_slow_pmu_v3_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct attribute *mcb_pmu_v3_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct attribute *mc_pmu_v3_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static const struct attribute_group l3c_pmu_v3_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .attrs = l3c_pmu_v3_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static const struct attribute_group iob_pmu_v3_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .attrs = iob_pmu_v3_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static const struct attribute_group iob_slow_pmu_v3_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .attrs = iob_slow_pmu_v3_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static const struct attribute_group mcb_pmu_v3_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .attrs = mcb_pmu_v3_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static const struct attribute_group mc_pmu_v3_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .attrs = mc_pmu_v3_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * sysfs event attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static ssize_t xgene_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct dev_ext_attribute *eattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) eattr = container_of(attr, struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define XGENE_PMU_EVENT_ATTR(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) (&((struct dev_ext_attribute[]) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) .var = (void *) _config, } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) })[0].attr.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static struct attribute *l3c_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static struct attribute *iob_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static struct attribute *mcb_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static struct attribute *mc_pmu_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static const struct attribute_group l3c_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .attrs = l3c_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static const struct attribute_group iob_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .attrs = iob_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static const struct attribute_group mcb_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) .attrs = mcb_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static const struct attribute_group mc_pmu_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .attrs = mc_pmu_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static struct attribute *l3c_pmu_v3_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) XGENE_PMU_EVENT_ATTR(read-hit, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) XGENE_PMU_EVENT_ATTR(read-miss, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) XGENE_PMU_EVENT_ATTR(read, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) XGENE_PMU_EVENT_ATTR(write, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) XGENE_PMU_EVENT_ATTR(request, 0x0a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) XGENE_PMU_EVENT_ATTR(tq-full, 0x0c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) XGENE_PMU_EVENT_ATTR(odb-full, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) XGENE_PMU_EVENT_ATTR(wbq-full, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) XGENE_PMU_EVENT_ATTR(total-insertion, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) XGENE_PMU_EVENT_ATTR(egression, 0x1b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) XGENE_PMU_EVENT_ATTR(replacement, 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) XGENE_PMU_EVENT_ATTR(generation-flip, 0x26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static struct attribute *iob_fast_pmu_v3_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static struct attribute *iob_slow_pmu_v3_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static struct attribute *mcb_pmu_v3_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) XGENE_PMU_EVENT_ATTR(req-receive, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) XGENE_PMU_EVENT_ATTR(gack-recv, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static struct attribute *mc_pmu_v3_events_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) XGENE_PMU_EVENT_ATTR(act-sent, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) XGENE_PMU_EVENT_ATTR(pre-sent, 0x02),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) XGENE_PMU_EVENT_ATTR(rd-sent, 0x03),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) XGENE_PMU_EVENT_ATTR(rda-sent, 0x04),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) XGENE_PMU_EVENT_ATTR(wr-sent, 0x05),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) XGENE_PMU_EVENT_ATTR(wra-sent, 0x06),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) XGENE_PMU_EVENT_ATTR(prea-sent, 0x09),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) XGENE_PMU_EVENT_ATTR(rd-retry, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) XGENE_PMU_EVENT_ATTR(wr-retry, 0x19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) XGENE_PMU_EVENT_ATTR(rank-change, 0x1b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) XGENE_PMU_EVENT_ATTR(dir-change, 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) XGENE_PMU_EVENT_ATTR(rank-active, 0x1e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) XGENE_PMU_EVENT_ATTR(rank-pd, 0x20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) XGENE_PMU_EVENT_ATTR(rank-sref, 0x21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) XGENE_PMU_EVENT_ATTR(tz-fail, 0x26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) XGENE_PMU_EVENT_ATTR(dram-errc, 0x27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) XGENE_PMU_EVENT_ATTR(dram-errd, 0x28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) XGENE_PMU_EVENT_ATTR(rd-enq, 0x29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static const struct attribute_group l3c_pmu_v3_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) .attrs = l3c_pmu_v3_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static const struct attribute_group iob_fast_pmu_v3_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) .attrs = iob_fast_pmu_v3_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static const struct attribute_group iob_slow_pmu_v3_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .attrs = iob_slow_pmu_v3_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static const struct attribute_group mcb_pmu_v3_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .attrs = mcb_pmu_v3_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static const struct attribute_group mc_pmu_v3_events_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .attrs = mc_pmu_v3_events_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * sysfs cpumask attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static ssize_t xgene_pmu_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static struct attribute *xgene_pmu_cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) &dev_attr_cpumask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static const struct attribute_group pmu_cpumask_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) .attrs = xgene_pmu_cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Per PMU device attribute groups of PMU v1 and v2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static const struct attribute_group *l3c_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) &l3c_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) &l3c_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static const struct attribute_group *iob_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) &iob_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) &iob_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static const struct attribute_group *mcb_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) &mcb_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) &mcb_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static const struct attribute_group *mc_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) &mc_pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) &mc_pmu_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Per PMU device attribute groups of PMU v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static const struct attribute_group *l3c_pmu_v3_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) &l3c_pmu_v3_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) &l3c_pmu_v3_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) &iob_pmu_v3_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) &iob_fast_pmu_v3_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) &iob_slow_pmu_v3_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) &iob_slow_pmu_v3_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static const struct attribute_group *mcb_pmu_v3_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) &mcb_pmu_v3_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) &mcb_pmu_v3_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static const struct attribute_group *mc_pmu_v3_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) &mc_pmu_v3_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) &pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) &mc_pmu_v3_events_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int cntr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) pmu_dev->max_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (cntr == pmu_dev->max_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) set_bit(cntr, pmu_dev->cntr_assign_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return cntr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) clear_bit(cntr, pmu_dev->cntr_assign_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) writel(PCPPMU_V3_INTCLRMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) u32 lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * v3 has 64-bit counter registers composed by 2 32-bit registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * This can be a problem if the counter increases and carries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * out of bit [31] between 2 reads. The extra reads would help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * to prevent this issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) } while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return (((u64)hi << 32) | lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u32 cnt_lo, cnt_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) cnt_hi = upper_32_bits(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cnt_lo = lower_32_bits(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* v3 has 64-bit counter registers composed by 2 32-bit registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) val |= 1 << idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) val |= 1 << idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) val |= 1 << idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) val |= 1 << idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) val = readl(pmu_dev->inf->csr + PMU_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) val |= PMU_PMCR_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) writel(val, pmu_dev->inf->csr + PMU_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) val = readl(pmu_dev->inf->csr + PMU_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) val |= PMU_PMCR_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) writel(val, pmu_dev->inf->csr + PMU_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) val = readl(pmu_dev->inf->csr + PMU_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) val &= ~PMU_PMCR_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) writel(val, pmu_dev->inf->csr + PMU_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static void xgene_perf_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pmu_dev->max_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) xgene_pmu->ops->start_counters(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static void xgene_perf_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) xgene_pmu->ops->stop_counters(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static int xgene_perf_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct perf_event *sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Test the event attr type check for PMU enumeration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * SOC PMU counters are shared across all cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Therefore, it does not support per-process mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * Also, it does not support event sampling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (event->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * Many perf core operations (eg. events rotation) operate on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * single CPU context. This is obvious for CPU PMUs, where one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * expects the same sets of events being observed on all CPUs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * but can lead to issues for off-core PMUs, where each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * event could be theoretically assigned to a different CPU. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * mitigate this, we enforce CPU assignment to one, selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * processor (the one described in the "cpumask" attribute).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) event->cpu = cpumask_first(&pmu_dev->parent->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) hw->config = event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Each bit of the config1 field represents an agent from which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * request of the event come. The event is counted only if it's caused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * by a request of an agent has the bit cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * By default, the event is counted for all agents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) hw->config_base = event->attr.config1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * We must NOT create groups containing mixed PMUs, although software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * events are acceptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (event->group_leader->pmu != event->pmu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) !is_software_event(event->group_leader))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) for_each_sibling_event(sibling, event->group_leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (sibling->pmu != event->pmu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) !is_software_event(sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void xgene_perf_enable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) GET_EVENTID(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (pmu_dev->inf->type == PMU_TYPE_IOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) xgene_pmu->ops->write_agent1msk(pmu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ~((u32)GET_AGENT1ID(event)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static void xgene_perf_disable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static void xgene_perf_event_set_period(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * For 32 bit counter, it has a period of 2^32. To account for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * possibility of extreme interrupt latency we program for a period of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * half that. Hopefully, we can handle the interrupt before another 2^31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * events occur and the counter overtakes its previous value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * For 64 bit counter, we don't expect it overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) u64 val = 1ULL << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) local64_set(&hw->prev_count, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static void xgene_perf_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) u64 delta, prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) prev_raw_count = local64_read(&hw->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) new_raw_count) != prev_raw_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void xgene_perf_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) xgene_perf_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static void xgene_perf_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) hw->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) xgene_perf_event_set_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (flags & PERF_EF_RELOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) u64 prev_raw_count = local64_read(&hw->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) prev_raw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) xgene_perf_enable_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void xgene_perf_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (hw->state & PERF_HES_UPTODATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) xgene_perf_disable_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) hw->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (hw->state & PERF_HES_UPTODATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) xgene_perf_read(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) hw->state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static int xgene_perf_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* Allocate an event counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) hw->idx = get_next_avail_cntr(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (hw->idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* Update counter event pointer for Interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) pmu_dev->pmu_counter_event[hw->idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xgene_perf_start(event, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static void xgene_perf_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) xgene_perf_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* clear the assigned counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) clear_avail_cntr(pmu_dev, GET_CNTR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) pmu_dev->pmu_counter_event[hw->idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct xgene_pmu *xgene_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (pmu_dev->parent->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) pmu_dev->max_period = PMU_CNT_MAX_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* First version PMU supports only single event counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (xgene_pmu->version == PCP_PMU_V1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) pmu_dev->max_counters = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pmu_dev->max_counters = PMU_MAX_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* Perf driver registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) pmu_dev->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .attr_groups = pmu_dev->attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .pmu_enable = xgene_perf_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .pmu_disable = xgene_perf_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .event_init = xgene_perf_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .add = xgene_perf_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .del = xgene_perf_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .start = xgene_perf_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .stop = xgene_perf_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .read = xgene_perf_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Hardware counter init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) xgene_pmu->ops->stop_counters(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) xgene_pmu->ops->reset_counters(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return perf_pmu_register(&pmu_dev->pmu, name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct device *dev = xgene_pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct xgene_pmu_dev *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) pmu->parent = xgene_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) pmu->inf = &ctx->inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ctx->pmu_dev = pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) switch (pmu->inf->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) case PMU_TYPE_L3C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) pmu->attr_groups = l3c_pmu_v3_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) pmu->attr_groups = l3c_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) case PMU_TYPE_IOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) pmu->attr_groups = iob_fast_pmu_v3_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) pmu->attr_groups = iob_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case PMU_TYPE_IOB_SLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) pmu->attr_groups = iob_slow_pmu_v3_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) case PMU_TYPE_MCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) pmu->attr_groups = mcb_pmu_v3_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) pmu->attr_groups = mcb_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) case PMU_TYPE_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) pmu->attr_groups = mc_pmu_v3_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) pmu->attr_groups = mc_pmu_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (xgene_init_perf(pmu, ctx->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) dev_info(dev, "%s PMU registered\n", ctx->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct xgene_pmu *xgene_pmu = pmu_dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) void __iomem *csr = pmu_dev->inf->csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) u32 pmovsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) xgene_pmu->ops->stop_counters(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (!pmovsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* Clear interrupt flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (xgene_pmu->version == PCP_PMU_V1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) writel(0x0, csr + PMU_PMOVSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) else if (xgene_pmu->version == PCP_PMU_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) writel(pmovsr, csr + PMU_PMOVSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) writel(pmovsr, csr + PMU_PMOVSCLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct perf_event *event = pmu_dev->pmu_counter_event[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int overflowed = pmovsr & BIT(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Ignore if we don't have an event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (!event || !overflowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) xgene_perf_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) xgene_perf_event_set_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) xgene_pmu->ops->start_counters(pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) u32 intr_mcu, intr_mcb, intr_l3c, intr_iob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct xgene_pmu *xgene_pmu = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* Get Interrupt PMU source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (xgene_pmu->version == PCP_PMU_V3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) intr_mcu = PCPPMU_V3_INT_MCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) intr_mcb = PCPPMU_V3_INT_MCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) intr_l3c = PCPPMU_V3_INT_L3C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) intr_iob = PCPPMU_V3_INT_IOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) intr_mcu = PCPPMU_INT_MCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) intr_mcb = PCPPMU_INT_MCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) intr_l3c = PCPPMU_INT_L3C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) intr_iob = PCPPMU_INT_IOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (val & intr_mcu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) _xgene_pmu_isr(irq, ctx->pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (val & intr_mcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) _xgene_pmu_isr(irq, ctx->pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (val & intr_l3c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) _xgene_pmu_isr(irq, ctx->pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (val & intr_iob) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) _xgene_pmu_isr(irq, ctx->pmu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) csw_csr = devm_platform_ioremap_resource(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (IS_ERR(csw_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return PTR_ERR(csw_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mcba_csr = devm_platform_ioremap_resource(pdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (IS_ERR(mcba_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return PTR_ERR(mcba_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (IS_ERR(mcbb_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return PTR_ERR(mcbb_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) xgene_pmu->l3c_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) reg = readl(csw_csr + CSW_CSWCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (reg & CSW_CSWCR_DUALMCB_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Dual MCB active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) xgene_pmu->mcb_active_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* Probe all active MC(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) reg = readl(mcbb_csr + CSW_CSWCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) xgene_pmu->mc_active_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /* Single MCB active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) xgene_pmu->mcb_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* Probe all active MC(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) reg = readl(mcba_csr + CSW_CSWCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) xgene_pmu->mc_active_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) void __iomem *csw_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) u32 mcb0routing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) u32 mcb1routing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) csw_csr = devm_platform_ioremap_resource(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (IS_ERR(csw_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return PTR_ERR(csw_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) reg = readl(csw_csr + CSW_CSWCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (reg & CSW_CSWCR_DUALMCB_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* Dual MCB active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) xgene_pmu->mcb_active_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* Probe all active L3C(s), maximum is 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) xgene_pmu->l3c_active_mask = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* Probe all active MC(s), maximum is 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if ((mcb0routing == 0x2) && (mcb1routing == 0x2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) xgene_pmu->mc_active_mask = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) else if ((mcb0routing == 0x1) && (mcb1routing == 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) xgene_pmu->mc_active_mask = 0x33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) xgene_pmu->mc_active_mask = 0x11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /* Single MCB active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) xgene_pmu->mcb_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Probe all active L3C(s), maximum is 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) xgene_pmu->l3c_active_mask = 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /* Probe all active MC(s), maximum is 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (mcb0routing == 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) xgene_pmu->mc_active_mask = 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) else if (mcb0routing == 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) xgene_pmu->mc_active_mask = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) xgene_pmu->mc_active_mask = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct regmap *csw_map, *mcba_map, *mcbb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (IS_ERR(csw_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return PTR_ERR(csw_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (IS_ERR(mcba_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return PTR_ERR(mcba_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (IS_ERR(mcbb_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return PTR_ERR(mcbb_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) xgene_pmu->l3c_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (regmap_read(csw_map, CSW_CSWCR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (reg & CSW_CSWCR_DUALMCB_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* Dual MCB active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) xgene_pmu->mcb_active_mask = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* Probe all active MC(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (regmap_read(mcbb_map, MCBADDRMR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) xgene_pmu->mc_active_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* Single MCB active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) xgene_pmu->mcb_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* Probe all active MC(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (regmap_read(mcba_map, MCBADDRMR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) xgene_pmu->mc_active_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (has_acpi_companion(&pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (xgene_pmu->version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) case PMU_TYPE_L3C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) case PMU_TYPE_IOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) case PMU_TYPE_IOB_SLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) case PMU_TYPE_MCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) case PMU_TYPE_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return devm_kasprintf(dev, GFP_KERNEL, "unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) #if defined(CONFIG_ACPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct acpi_device *adev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct device *dev = xgene_pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct list_head resource_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) const union acpi_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct hw_pmu_info *inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) void __iomem *dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct resource_entry *rentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int enable_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) INIT_LIST_HEAD(&resource_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (rc <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) dev_err(dev, "PMU type %d: No resources found\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) list_for_each_entry(rentry, &resource_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (resource_type(rentry->res) == IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) res = *rentry->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) rentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) acpi_dev_free_resource_list(&resource_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (rentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) dev_err(dev, "PMU type %d: No memory resource found\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev_csr = devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (IS_ERR(dev_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) dev_err(dev, "PMU type %d: Fail to map resource\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* A PMU device node without enable-bit-index is always enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) rc = acpi_dev_get_property(adev, "enable-bit-index",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ACPI_TYPE_INTEGER, &obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) enable_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) enable_bit = (int) obj->integer.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (!ctx->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) dev_err(dev, "PMU type %d: Fail to get device name\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) inf = &ctx->inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) inf->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) inf->csr = dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) inf->enable_mask = 1 << enable_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static const struct acpi_device_id xgene_pmu_acpi_type_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {"APMC0D5D", PMU_TYPE_L3C},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {"APMC0D5E", PMU_TYPE_IOB},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {"APMC0D5F", PMU_TYPE_MCB},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {"APMC0D60", PMU_TYPE_MC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {"APMC0D84", PMU_TYPE_L3C},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {"APMC0D85", PMU_TYPE_IOB},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {"APMC0D86", PMU_TYPE_IOB_SLOW},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {"APMC0D87", PMU_TYPE_MCB},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {"APMC0D88", PMU_TYPE_MC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static const struct acpi_device_id *xgene_pmu_acpi_match_type(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) const struct acpi_device_id *ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct acpi_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) const struct acpi_device_id *match_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) const struct acpi_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) for (id = ids; id->id[0] || id->cls; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!acpi_match_device_ids(adev, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) match_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) else if (match_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return match_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) void *data, void **return_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) const struct acpi_device_id *acpi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct xgene_pmu *xgene_pmu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (acpi_bus_get_device(handle, &adev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (acpi_bus_get_status(adev) || !adev->status.present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!acpi_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /* Can't add the PMU device, skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) devm_kfree(xgene_pmu->dev, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) switch (ctx->inf.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) case PMU_TYPE_L3C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) list_add(&ctx->next, &xgene_pmu->l3cpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) case PMU_TYPE_IOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) list_add(&ctx->next, &xgene_pmu->iobpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) case PMU_TYPE_IOB_SLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) list_add(&ctx->next, &xgene_pmu->iobpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) case PMU_TYPE_MCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) list_add(&ctx->next, &xgene_pmu->mcbpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) case PMU_TYPE_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) list_add(&ctx->next, &xgene_pmu->mcpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct device *dev = xgene_pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) acpi_handle handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) handle = ACPI_HANDLE(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) dev_err(dev, "failed to probe PMU devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) struct device_node *np, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct device *dev = xgene_pmu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct hw_pmu_info *inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) void __iomem *dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) int enable_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (of_address_to_resource(np, 0, &res) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) dev_err(dev, "PMU type %d: No resource address found\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) dev_csr = devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (IS_ERR(dev_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) dev_err(dev, "PMU type %d: Fail to map resource\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /* A PMU device node without enable-bit-index is always enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) enable_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (!ctx->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) dev_err(dev, "PMU type %d: Fail to get device name\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) inf = &ctx->inf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) inf->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) inf->csr = dev_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) inf->enable_mask = 1 << enable_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) for_each_child_of_node(pdev->dev.of_node, np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (!of_device_is_available(np))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /* Can't add the PMU device, skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) devm_kfree(xgene_pmu->dev, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) switch (ctx->inf.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) case PMU_TYPE_L3C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) list_add(&ctx->next, &xgene_pmu->l3cpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case PMU_TYPE_IOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) list_add(&ctx->next, &xgene_pmu->iobpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) case PMU_TYPE_IOB_SLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) list_add(&ctx->next, &xgene_pmu->iobpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) case PMU_TYPE_MCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) list_add(&ctx->next, &xgene_pmu->mcbpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) case PMU_TYPE_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) list_add(&ctx->next, &xgene_pmu->mcpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (has_acpi_companion(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static const struct xgene_pmu_data xgene_pmu_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) .id = PCP_PMU_V1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static const struct xgene_pmu_data xgene_pmu_v2_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) .id = PCP_PMU_V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) static const struct xgene_pmu_ops xgene_pmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) .mask_int = xgene_pmu_mask_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) .unmask_int = xgene_pmu_unmask_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) .read_counter = xgene_pmu_read_counter32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) .write_counter = xgene_pmu_write_counter32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) .write_evttype = xgene_pmu_write_evttype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) .write_agentmsk = xgene_pmu_write_agentmsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) .write_agent1msk = xgene_pmu_write_agent1msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) .enable_counter = xgene_pmu_enable_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) .disable_counter = xgene_pmu_disable_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) .enable_counter_int = xgene_pmu_enable_counter_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) .disable_counter_int = xgene_pmu_disable_counter_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) .reset_counters = xgene_pmu_reset_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) .start_counters = xgene_pmu_start_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) .stop_counters = xgene_pmu_stop_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static const struct xgene_pmu_ops xgene_pmu_v3_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) .mask_int = xgene_pmu_v3_mask_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) .unmask_int = xgene_pmu_v3_unmask_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) .read_counter = xgene_pmu_read_counter64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) .write_counter = xgene_pmu_write_counter64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) .write_evttype = xgene_pmu_write_evttype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) .write_agentmsk = xgene_pmu_v3_write_agentmsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) .write_agent1msk = xgene_pmu_v3_write_agent1msk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) .enable_counter = xgene_pmu_enable_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) .disable_counter = xgene_pmu_disable_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) .enable_counter_int = xgene_pmu_enable_counter_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) .disable_counter_int = xgene_pmu_disable_counter_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) .reset_counters = xgene_pmu_reset_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) .start_counters = xgene_pmu_start_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) .stop_counters = xgene_pmu_stop_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static const struct of_device_id xgene_pmu_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static const struct acpi_device_id xgene_pmu_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) {"APMC0D5B", PCP_PMU_V1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {"APMC0D5C", PCP_PMU_V2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {"APMC0D83", PCP_PMU_V3},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (cpumask_empty(&xgene_pmu->cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) cpumask_set_cpu(cpu, &xgene_pmu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Overflow interrupt also should use the same CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) unsigned int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) target = cpumask_any_but(cpu_online_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (target >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) cpumask_set_cpu(target, &xgene_pmu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /* Overflow interrupt also should use the same CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static int xgene_pmu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) const struct xgene_pmu_data *dev_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct xgene_pmu *xgene_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) int irq, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* Install a hook to update the reader CPU in case it goes offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) xgene_pmu_online_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) xgene_pmu_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (!xgene_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) xgene_pmu->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) platform_set_drvdata(pdev, xgene_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) version = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (of_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) dev_data = (const struct xgene_pmu_data *) of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) version = dev_data->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (ACPI_COMPANION(&pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) const struct acpi_device_id *acpi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (acpi_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) version = (int) acpi_id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (version < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (version == PCP_PMU_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) xgene_pmu->ops = &xgene_pmu_v3_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) xgene_pmu->ops = &xgene_pmu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) INIT_LIST_HEAD(&xgene_pmu->iobpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) INIT_LIST_HEAD(&xgene_pmu->mcpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) xgene_pmu->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (IS_ERR(xgene_pmu->pcppmu_csr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return PTR_ERR(xgene_pmu->pcppmu_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) IRQF_NOBALANCING | IRQF_NO_THREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) dev_name(&pdev->dev), xgene_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) xgene_pmu->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) raw_spin_lock_init(&xgene_pmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) /* Check for active MCBs and MCUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) xgene_pmu->mcb_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) xgene_pmu->mc_active_mask = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /* Add this instance to the list used by the hotplug callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) &xgene_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) dev_err(&pdev->dev, "Error %d registering hotplug", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) /* Walk through the tree for all PMU perf devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) dev_err(&pdev->dev, "No PMU perf devices found!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /* Enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) xgene_pmu->ops->unmask_int(xgene_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) &xgene_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) struct xgene_pmu_dev_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) list_for_each_entry(ctx, pmus, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) perf_pmu_unregister(&ctx->pmu_dev->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static int xgene_pmu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) &xgene_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) static struct platform_driver xgene_pmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) .probe = xgene_pmu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) .remove = xgene_pmu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) .name = "xgene-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) .of_match_table = xgene_pmu_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) builtin_platform_driver(xgene_pmu_driver);