Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) // CCI Cache Coherent Interconnect PMU driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) // Copyright (C) 2013-2018 Arm Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) // Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/arm-cci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #define DRIVER_NAME		"ARM-CCI PMU"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define CCI_PMCR		0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define CCI_PID2		0x0fe8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define CCI_PMCR_CEN		0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define CCI_PMCR_NCNT_MASK	0x0000f800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define CCI_PMCR_NCNT_SHIFT	11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define CCI_PID2_REV_MASK	0xf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define CCI_PID2_REV_SHIFT	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define CCI_PMU_EVT_SEL		0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define CCI_PMU_CNTR		0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define CCI_PMU_CNTR_CTRL	0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define CCI_PMU_OVRFLW		0x00c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define CCI_PMU_OVRFLW_FLAG	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define CCI_PMU_CNTR_SIZE(model)	((model)->cntr_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define CCI_PMU_CNTR_BASE(model, idx)	((idx) * CCI_PMU_CNTR_SIZE(model))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define CCI_PMU_CNTR_MASK		((1ULL << 32) -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define CCI_PMU_CNTR_LAST(cci_pmu)	(cci_pmu->num_cntrs - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define CCI_PMU_MAX_HW_CNTRS(model) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /* Types of interfaces that can generate events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	CCI_IF_SLAVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	CCI_IF_MASTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #ifdef CONFIG_ARM_CCI5xx_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	CCI_IF_GLOBAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	CCI_IF_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define NUM_HW_CNTRS_CII_4XX	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define NUM_HW_CNTRS_CII_5XX	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define NUM_HW_CNTRS_MAX	NUM_HW_CNTRS_CII_5XX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define FIXED_HW_CNTRS_CII_4XX	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define FIXED_HW_CNTRS_CII_5XX	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define FIXED_HW_CNTRS_MAX	FIXED_HW_CNTRS_CII_4XX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define HW_CNTRS_MAX		(NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) struct event_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	u32 min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	u32 max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) struct cci_pmu_hw_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct perf_event **events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	unsigned long *used_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	raw_spinlock_t pmu_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) struct cci_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * struct cci_pmu_model:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * @fixed_hw_cntrs - Number of fixed event counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * @num_hw_cntrs - Maximum number of programmable event counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * @cntr_size - Size of an event counter mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) struct cci_pmu_model {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	u32 fixed_hw_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	u32 num_hw_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	u32 cntr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct attribute **format_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct attribute **event_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct event_range event_ranges[CCI_IF_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	int (*validate_hw_event)(struct cci_pmu *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	void (*write_counters)(struct cci_pmu *, unsigned long *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static struct cci_pmu_model cci_pmu_models[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) struct cci_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	void __iomem *ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct pmu pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	int nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	int *irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	unsigned long active_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	const struct cci_pmu_model *model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct cci_pmu_hw_events hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	struct platform_device *plat_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	int num_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	atomic_t active_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	struct mutex reserve_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define to_cci_pmu(c)	(container_of(c, struct cci_pmu, pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static struct cci_pmu *g_cci_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) enum cci_models {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #ifdef CONFIG_ARM_CCI400_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	CCI400_R0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	CCI400_R1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #ifdef CONFIG_ARM_CCI5xx_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	CCI500_R0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	CCI550_R0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	CCI_MODEL_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static void pmu_write_counters(struct cci_pmu *cci_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 				 unsigned long *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			struct device_attribute *attr, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			struct device_attribute *attr, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) 				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	&((struct dev_ext_attribute[]) {					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		{ __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	})[0].attr.attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /* CCI400 PMU Specific definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #ifdef CONFIG_ARM_CCI400_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) /* Port ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define CCI400_PORT_S0		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define CCI400_PORT_S1		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define CCI400_PORT_S2		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define CCI400_PORT_S3		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define CCI400_PORT_S4		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define CCI400_PORT_M0		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define CCI400_PORT_M1		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define CCI400_PORT_M2		7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define CCI400_R1_PX		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  * Instead of an event id to monitor CCI cycles, a dedicated counter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * provided. Use 0xff to represent CCI cycles and hope that no future revisions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * make use of this event in hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) enum cci400_perf_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	CCI400_PMU_CYCLES = 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #define CCI400_PMU_CYCLE_CNTR_IDX	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #define CCI400_PMU_CNTR0_IDX		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * ports and bits 4:0 are event codes. There are different event codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * associated with each port type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * Additionally, the range of events associated with the port types changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * between Rev0 and Rev1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * The constants below define the range of valid codes for each port type for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * the different revisions and are used to validate the event to be monitored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define CCI400_PMU_EVENT_MASK		0xffUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #define CCI400_PMU_EVENT_SOURCE_SHIFT	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define CCI400_PMU_EVENT_SOURCE_MASK	0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define CCI400_PMU_EVENT_CODE_SHIFT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define CCI400_PMU_EVENT_CODE_MASK	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define CCI400_PMU_EVENT_SOURCE(event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			CCI400_PMU_EVENT_SOURCE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define CCI400_PMU_EVENT_CODE(event) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) #define CCI400_R0_SLAVE_PORT_MIN_EV	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #define CCI400_R0_SLAVE_PORT_MAX_EV	0x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #define CCI400_R0_MASTER_PORT_MIN_EV	0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #define CCI400_R0_MASTER_PORT_MAX_EV	0x1a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define CCI400_R1_SLAVE_PORT_MIN_EV	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define CCI400_R1_SLAVE_PORT_MAX_EV	0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #define CCI400_R1_MASTER_PORT_MIN_EV	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define CCI400_R1_MASTER_PORT_MAX_EV	0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 					(unsigned long)_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			struct device_attribute *attr, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) static struct attribute *cci400_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static struct attribute *cci400_r0_pmu_event_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	/* Slave events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* Master events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/* Special event for cycles counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static struct attribute *cci400_r1_pmu_event_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	/* Slave events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	/* Master events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	/* Special event for cycles counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	struct dev_ext_attribute *eattr = container_of(attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 				struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				struct cci_pmu_hw_events *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				unsigned long cci_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	/* cycles event idx is fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (cci_event == CCI400_PMU_CYCLES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		return CCI400_PMU_CYCLE_CNTR_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		if (!test_and_set_bit(idx, hw->used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/* No counters available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	int if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	if (hw_event & ~CCI400_PMU_EVENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (hw_event == CCI400_PMU_CYCLES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		return hw_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	switch (ev_source) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	case CCI400_PORT_S0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	case CCI400_PORT_S1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	case CCI400_PORT_S2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	case CCI400_PORT_S3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	case CCI400_PORT_S4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		/* Slave Interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if_type = CCI_IF_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	case CCI400_PORT_M0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	case CCI400_PORT_M1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	case CCI400_PORT_M2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		/* Master Interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if_type = CCI_IF_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		ev_code <= cci_pmu->model->event_ranges[if_type].max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		return hw_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) static int probe_cci400_revision(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	int rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	rev >>= CCI_PID2_REV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	if (rev < CCI400_R1_PX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		return CCI400_R0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		return CCI400_R1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (platform_has_secure_cci_access())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		return &cci_pmu_models[probe_cci400_revision(cci_pmu)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) #else	/* !CONFIG_ARM_CCI400_PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) #endif	/* CONFIG_ARM_CCI400_PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) #ifdef CONFIG_ARM_CCI5xx_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * CCI5xx PMU event id is an 9-bit value made of two parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  *	 bits [8:5] - Source for the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  *	 bits [4:0] - Event code (specific to type of interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) /* Port ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) #define CCI5xx_PORT_S0			0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #define CCI5xx_PORT_S1			0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) #define CCI5xx_PORT_S2			0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) #define CCI5xx_PORT_S3			0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #define CCI5xx_PORT_S4			0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) #define CCI5xx_PORT_S5			0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) #define CCI5xx_PORT_S6			0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #define CCI5xx_PORT_M0			0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) #define CCI5xx_PORT_M1			0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) #define CCI5xx_PORT_M2			0xa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) #define CCI5xx_PORT_M3			0xb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) #define CCI5xx_PORT_M4			0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #define CCI5xx_PORT_M5			0xd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) #define CCI5xx_PORT_M6			0xe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) #define CCI5xx_PORT_GLOBAL		0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) #define CCI5xx_PMU_EVENT_MASK		0x1ffUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) #define CCI5xx_PMU_EVENT_SOURCE_SHIFT	0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #define CCI5xx_PMU_EVENT_SOURCE_MASK	0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) #define CCI5xx_PMU_EVENT_CODE_SHIFT	0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) #define CCI5xx_PMU_EVENT_CODE_MASK	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) #define CCI5xx_PMU_EVENT_SOURCE(event)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) #define CCI5xx_PMU_EVENT_CODE(event)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #define CCI5xx_SLAVE_PORT_MIN_EV	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) #define CCI5xx_SLAVE_PORT_MAX_EV	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) #define CCI5xx_MASTER_PORT_MIN_EV	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) #define CCI5xx_MASTER_PORT_MAX_EV	0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) #define CCI5xx_GLOBAL_PORT_MIN_EV	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) #define CCI5xx_GLOBAL_PORT_MAX_EV	0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 					(unsigned long) _config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				struct device_attribute *attr, char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) static struct attribute *cci5xx_pmu_format_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) static struct attribute *cci5xx_pmu_event_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	/* Slave events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/* Master events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	/* Global events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 				struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct dev_ext_attribute *eattr = container_of(attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 					struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	/* Global events have single fixed source code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 				(unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * CCI500 provides 8 independent event counters that can count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * any of the events available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  * CCI500 PMU event source ids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  *	0x0-0x6 - Slave interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  *	0x8-0xD - Master interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  *	0xf     - Global Events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  *	0x7,0xe - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 					unsigned long hw_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	int if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	switch (ev_source) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	case CCI5xx_PORT_S0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	case CCI5xx_PORT_S1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	case CCI5xx_PORT_S2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	case CCI5xx_PORT_S3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	case CCI5xx_PORT_S4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	case CCI5xx_PORT_S5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	case CCI5xx_PORT_S6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		if_type = CCI_IF_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	case CCI5xx_PORT_M0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	case CCI5xx_PORT_M1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	case CCI5xx_PORT_M2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	case CCI5xx_PORT_M3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	case CCI5xx_PORT_M4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	case CCI5xx_PORT_M5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if_type = CCI_IF_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	case CCI5xx_PORT_GLOBAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if_type = CCI_IF_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		ev_code <= cci_pmu->model->event_ranges[if_type].max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		return hw_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * CCI550 provides 8 independent event counters that can count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * any of the events available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  * CCI550 PMU event source ids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *	0x0-0x6 - Slave interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  *	0x8-0xe - Master interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *	0xf     - Global Events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  *	0x7	- Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 					unsigned long hw_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	int if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	switch (ev_source) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	case CCI5xx_PORT_S0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	case CCI5xx_PORT_S1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	case CCI5xx_PORT_S2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	case CCI5xx_PORT_S3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	case CCI5xx_PORT_S4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	case CCI5xx_PORT_S5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	case CCI5xx_PORT_S6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		if_type = CCI_IF_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	case CCI5xx_PORT_M0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	case CCI5xx_PORT_M1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	case CCI5xx_PORT_M2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	case CCI5xx_PORT_M3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	case CCI5xx_PORT_M4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	case CCI5xx_PORT_M5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	case CCI5xx_PORT_M6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if_type = CCI_IF_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	case CCI5xx_PORT_GLOBAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		if_type = CCI_IF_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		ev_code <= cci_pmu->model->event_ranges[if_type].max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return hw_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) #endif	/* CONFIG_ARM_CCI5xx_PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * Program the CCI PMU counters which have PERF_HES_ARCH set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * with the event period and mark them ready before we enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	DECLARE_BITMAP(mask, HW_CNTRS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	bitmap_zero(mask, cci_pmu->num_cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		struct perf_event *event = cci_hw->events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		if (WARN_ON(!event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		/* Leave the events which are not counting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (event->hw.state & PERF_HES_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (event->hw.state & PERF_HES_ARCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			set_bit(i, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			event->hw.state &= ~PERF_HES_ARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	pmu_write_counters(cci_pmu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) /* Should be called with cci_pmu->hw_events->pmu_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Enable all the PMU counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	writel(val, cci_pmu->ctrl_base + CCI_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) /* Should be called with cci_pmu->hw_events->pmu_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	cci_pmu_sync_counters(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	__cci_pmu_enable_nosync(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) /* Should be called with cci_pmu->hw_events->pmu_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	/* Disable all the PMU counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	writel(val, cci_pmu->ctrl_base + CCI_PMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static ssize_t cci_pmu_format_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	struct dev_ext_attribute *eattr = container_of(attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static ssize_t cci_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct dev_ext_attribute *eattr = container_of(attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				struct dev_ext_attribute, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/* source parameter is mandatory for normal PMU events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 					 (unsigned long)eattr->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	return readl_relaxed(cci_pmu->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			     CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			       int idx, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	writel_relaxed(value, cci_pmu->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		       CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static bool __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * For all counters on the CCI-PMU, disable any 'enabled' counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * saving the changed counters in the mask, so that we can restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * it later using pmu_restore_counters. The mask is private to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * caller. We cannot rely on the used_mask maintained by the CCI_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * as it only tells us if the counter is assigned to perf_event or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * The state of the perf_event cannot be locked by the PMU layer, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * we check the individual counter status (which can be locked by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * cci_pm->hw_events->pmu_lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * @mask should be initialised to empty by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	for (i = 0; i < cci_pmu->num_cntrs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		if (pmu_counter_is_enabled(cci_pmu, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			set_bit(i, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			pmu_disable_counter(cci_pmu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * Restore the status of the counters. Reversal of the pmu_save_counters().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * For each counter set in the mask, enable the counter back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) static void __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	for_each_set_bit(i, mask, cci_pmu->num_cntrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		pmu_enable_counter(cci_pmu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  * Returns the number of programmable counters actually implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  * by the cci
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	unsigned long cci_event = event->hw.config_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (cci_pmu->model->get_event_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	/* Generic code to find an unused idx from the mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (!test_and_set_bit(idx, hw->used_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	/* No counters available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static int pmu_map_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (event->attr.type < PERF_TYPE_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			!cci_pmu->model->validate_hw_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	return	cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	struct platform_device *pmu_device = cci_pmu->plat_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (unlikely(!pmu_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (cci_pmu->nr_irqs < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 * Register all available CCI PMU interrupts. In the interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 * we iterate over the counters checking for interrupt source (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * overflowing counter) and clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * This should allow handling of non-unique interrupt for the counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	for (i = 0; i < cci_pmu->nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				"arm-cci-pmu", cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 				cci_pmu->irqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		set_bit(i, &cci_pmu->active_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) static void pmu_free_irq(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	for (i = 0; i < cci_pmu->nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		free_irq(cci_pmu->irqs[i], cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static u32 pmu_read_counter(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct hw_perf_event *hw_counter = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int idx = hw_counter->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		struct perf_event *event = cci_hw->events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (WARN_ON(!event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (cci_pmu->model->write_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		cci_pmu->model->write_counters(cci_pmu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		__pmu_write_counters(cci_pmu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) #ifdef CONFIG_ARM_CCI5xx_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  * CCI-500/CCI-550 has advanced power saving policies, which could gate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * clocks to the PMU counters, which makes the writes to them ineffective.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * The only way to write to those counters is when the global counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * are enabled and the particular counter is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * So we do the following :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * 1) Disable all the PMU counters, saving their current state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * 2) Enable the global PMU profiling, now that all counters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  *    disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * For each counter to be programmed, repeat steps 3-7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * 3) Write an invalid event code to the event control register for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)       counter, so that the counters are not modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * 4) Enable the counter control for the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * 5) Set the counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * 6) Disable the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * 7) Restore the event in the target counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * 8) Disable the global PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * 9) Restore the status of the rest of the counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  * We choose an event which for CCI-5xx is guaranteed not to count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * We use the highest possible event code (0x1f) for the master interface 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) #define CCI5xx_INVALID_EVENT	((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				 (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	bitmap_zero(saved_mask, cci_pmu->num_cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	pmu_save_counters(cci_pmu, saved_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * Now that all the counters are disabled, we can safely turn the PMU on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * without syncing the status of the counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	__cci_pmu_enable_nosync(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		struct perf_event *event = cci_pmu->hw_events.events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		if (WARN_ON(!event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		pmu_enable_counter(cci_pmu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		pmu_disable_counter(cci_pmu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		pmu_set_event(cci_pmu, i, event->hw.config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	__cci_pmu_disable(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	pmu_restore_counters(cci_pmu, saved_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) #endif	/* CONFIG_ARM_CCI5xx_PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) static u64 pmu_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	u64 delta, prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		new_raw_count = pmu_read_counter(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		 new_raw_count) != prev_raw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	return new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static void pmu_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	pmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static void pmu_event_set_period(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	 * The CCI PMU counters have a period of 2^32. To account for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	 * possiblity of extreme interrupt latency we program for a period of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * half that. Hopefully we can handle the interrupt before another 2^31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * events occur and the counter overtakes its previous value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	u64 val = 1ULL << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	local64_set(&hwc->prev_count, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * values needs to be sync-ed with the s/w state before the PMU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 * Mark this counter for sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	hwc->state |= PERF_HES_ARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct cci_pmu *cci_pmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	int idx, handled = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	/* Disable the PMU while we walk through the counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	__cci_pmu_disable(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * Iterate over counters and update the corresponding perf events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * This should work regardless of whether we have per-counter overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * interrupt or a combined overflow interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		struct perf_event *event = events->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		/* Did this counter overflow? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		      CCI_PMU_OVRFLW_FLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 							CCI_PMU_OVRFLW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		pmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		pmu_event_set_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		handled = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	/* Enable the PMU and sync possibly overflowed counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	__cci_pmu_enable_sync(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		pmu_free_irq(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	pmu_free_irq(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static void hw_perf_event_destroy(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	atomic_t *active_events = &cci_pmu->active_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		cci_pmu_put_hw(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		mutex_unlock(reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static void cci_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (!enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	__cci_pmu_enable_sync(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static void cci_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	__cci_pmu_disable(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  * Check if the idx represents a non-programmable counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  * All the fixed event counters are mapped before the programmable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  * counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void cci_pmu_start(struct perf_event *event, int pmu_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	 * To handle interrupt latency, we always reprogram the period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	 * regardlesss of PERF_EF_RELOAD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (pmu_flags & PERF_EF_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/* Configure the counter unless you are counting a fixed event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (!pmu_fixed_hw_idx(cci_pmu, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		pmu_set_event(cci_pmu, idx, hwc->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	pmu_event_set_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	pmu_enable_counter(cci_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (hwc->state & PERF_HES_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 * cci_pmu_start()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	pmu_disable_counter(cci_pmu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	pmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int cci_pmu_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	/* If we don't have a space for the counter then finish early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	idx = pmu_get_event_idx(hw_events, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	event->hw.idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	hw_events->events[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		cci_pmu_start(event, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	/* Propagate our changes to the userspace mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static void cci_pmu_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	cci_pmu_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	hw_events->events[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	clear_bit(idx, hw_events->used_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static int validate_event(struct pmu *cci_pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			  struct cci_pmu_hw_events *hw_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			  struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	if (is_software_event(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * core perf code won't check that the pmu->ctx == leader->ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * until after pmu->event_init(event).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (event->pmu != cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	if (event->state < PERF_EVENT_STATE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	return pmu_get_event_idx(hw_events, event) >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static int validate_group(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	struct perf_event *sibling, *leader = event->group_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	struct cci_pmu_hw_events fake_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		 * Initialise the fake PMU. We only need to populate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		 * used_mask for the purposes of validation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		.used_mask = mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (!validate_event(event->pmu, &fake_pmu, leader))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	for_each_sibling_event(sibling, leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		if (!validate_event(event->pmu, &fake_pmu, sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (!validate_event(event->pmu, &fake_pmu, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static int __hw_perf_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	int mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	mapping = pmu_map_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (mapping < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		pr_debug("event %x:%llx not supported\n", event->attr.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			 event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		return mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	 * We don't assign an index until we actually place the event onto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	 * hardware. Use -1 to signify that we haven't decided where to put it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	 * yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	hwc->idx		= -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	hwc->config_base	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	hwc->config		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	hwc->event_base		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 * Store the event encoding into the config_base field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	hwc->config_base	    |= (unsigned long)mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (event->group_leader != event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		if (validate_group(event) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static int cci_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	atomic_t *active_events = &cci_pmu->active_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (event->attr.type != event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	/* Shared by all CPUs, no meaningful state to sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 * Following the example set by other "uncore" PMUs, we accept any CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	 * and rewrite its affinity dynamically rather than having perf core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 * handle cpu == -1 and pid == -1 for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	 * The perf core will pin online CPUs for the duration of this call and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	 * the event being installed into its context, so the PMU's CPU can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	 * change under our feet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (event->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	event->cpu = cci_pmu->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	event->destroy = hw_perf_event_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (!atomic_inc_not_zero(active_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		mutex_lock(&cci_pmu->reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		if (atomic_read(active_events) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			err = cci_pmu_get_hw(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			atomic_inc(active_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		mutex_unlock(&cci_pmu->reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	err = __hw_perf_event_init(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		hw_perf_event_destroy(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static ssize_t pmu_cpumask_attr_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 				     struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct pmu *pmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static struct device_attribute pmu_cpumask_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	__ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static struct attribute *pmu_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	&pmu_cpumask_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static struct attribute_group pmu_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	.attrs = pmu_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static struct attribute_group pmu_format_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	.name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static struct attribute_group pmu_event_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	.name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	.attrs = NULL,		/* Filled in cci_pmu_init_attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static const struct attribute_group *pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	&pmu_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	&pmu_format_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	&pmu_event_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	const struct cci_pmu_model *model = cci_pmu->model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	char *name = model->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	u32 num_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	pmu_event_attr_group.attrs = model->event_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	pmu_format_attr_group.attrs = model->format_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	cci_pmu->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		.module		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		.name		= cci_pmu->model->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		.task_ctx_nr	= perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		.pmu_enable	= cci_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		.pmu_disable	= cci_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		.event_init	= cci_pmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		.add		= cci_pmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		.del		= cci_pmu_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		.start		= cci_pmu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		.stop		= cci_pmu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		.read		= pmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		.attr_groups	= pmu_attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	cci_pmu->plat_device = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	num_cntrs = pmu_get_max_counters(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			"PMU implements more counters(%d) than supported by"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			" the model(%d), truncated.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			num_cntrs, cci_pmu->model->num_hw_cntrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		num_cntrs = cci_pmu->model->num_hw_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	return perf_pmu_register(&cci_pmu->pmu, name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static int cci_pmu_offline_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (!g_cci_pmu || cpu != g_cci_pmu->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	target = cpumask_any_but(cpu_online_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (target >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	g_cci_pmu->cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static __maybe_unused struct cci_pmu_model cci_pmu_models[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #ifdef CONFIG_ARM_CCI400_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	[CCI400_R0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		.name = "CCI_400",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		.cntr_size = SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		.format_attrs = cci400_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		.event_attrs = cci400_r0_pmu_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		.event_ranges = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			[CCI_IF_SLAVE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 				CCI400_R0_SLAVE_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 				CCI400_R0_SLAVE_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			[CCI_IF_MASTER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 				CCI400_R0_MASTER_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 				CCI400_R0_MASTER_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		.validate_hw_event = cci400_validate_hw_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		.get_event_idx = cci400_get_event_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	[CCI400_R1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		.name = "CCI_400_r1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		.num_hw_cntrs = NUM_HW_CNTRS_CII_4XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		.cntr_size = SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		.format_attrs = cci400_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		.event_attrs = cci400_r1_pmu_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		.event_ranges = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			[CCI_IF_SLAVE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 				CCI400_R1_SLAVE_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 				CCI400_R1_SLAVE_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			[CCI_IF_MASTER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 				CCI400_R1_MASTER_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 				CCI400_R1_MASTER_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		.validate_hw_event = cci400_validate_hw_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		.get_event_idx = cci400_get_event_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) #ifdef CONFIG_ARM_CCI5xx_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	[CCI500_R0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		.name = "CCI_500",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		.cntr_size = SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		.format_attrs = cci5xx_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		.event_attrs = cci5xx_pmu_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		.event_ranges = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			[CCI_IF_SLAVE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 				CCI5xx_SLAVE_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 				CCI5xx_SLAVE_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			[CCI_IF_MASTER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 				CCI5xx_MASTER_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 				CCI5xx_MASTER_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			[CCI_IF_GLOBAL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				CCI5xx_GLOBAL_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 				CCI5xx_GLOBAL_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		.validate_hw_event = cci500_validate_hw_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		.write_counters	= cci5xx_pmu_write_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	[CCI550_R0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		.name = "CCI_550",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		.fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		.num_hw_cntrs = NUM_HW_CNTRS_CII_5XX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		.cntr_size = SZ_64K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		.format_attrs = cci5xx_pmu_format_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		.event_attrs = cci5xx_pmu_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		.event_ranges = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			[CCI_IF_SLAVE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 				CCI5xx_SLAVE_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				CCI5xx_SLAVE_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			[CCI_IF_MASTER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 				CCI5xx_MASTER_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 				CCI5xx_MASTER_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			[CCI_IF_GLOBAL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 				CCI5xx_GLOBAL_PORT_MIN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 				CCI5xx_GLOBAL_PORT_MAX_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 			},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		.validate_hw_event = cci550_validate_hw_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		.write_counters	= cci5xx_pmu_write_counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static const struct of_device_id arm_cci_pmu_matches[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) #ifdef CONFIG_ARM_CCI400_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		.compatible = "arm,cci-400-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		.data	= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		.compatible = "arm,cci-400-pmu,r0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		.data	= &cci_pmu_models[CCI400_R0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		.compatible = "arm,cci-400-pmu,r1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		.data	= &cci_pmu_models[CCI400_R1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) #ifdef CONFIG_ARM_CCI5xx_PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		.compatible = "arm,cci-500-pmu,r0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		.data = &cci_pmu_models[CCI500_R0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		.compatible = "arm,cci-550-pmu,r0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		.data = &cci_pmu_models[CCI550_R0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	for (i = 0; i < nr_irqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		if (irq == irqs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static struct cci_pmu *cci_pmu_alloc(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct cci_pmu *cci_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	const struct cci_pmu_model *model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	 * All allocations are devm_* hence we don't have to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 * them explicitly on an error, as it would end up in driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 * detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	if (!cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	model = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	if (!model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			 "DEPRECATED compatible property, requires secure access to CCI registers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		model = probe_cci_model(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	if (!model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		dev_warn(dev, "CCI PMU version not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	cci_pmu->model = model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 					sizeof(*cci_pmu->irqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	if (!cci_pmu->irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	cci_pmu->hw_events.events = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 					     CCI_PMU_MAX_HW_CNTRS(model),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 					     sizeof(*cci_pmu->hw_events.events),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 					     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (!cci_pmu->hw_events.events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	cci_pmu->hw_events.used_mask = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 						BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 						sizeof(*cci_pmu->hw_events.used_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 						GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (!cci_pmu->hw_events.used_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	return cci_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static int cci_pmu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	struct cci_pmu *cci_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	int i, ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	cci_pmu = cci_pmu_alloc(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (IS_ERR(cci_pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		return PTR_ERR(cci_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	cci_pmu->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	if (IS_ERR(cci_pmu->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	 * CCI PMU has one overflow interrupt per counter; but some may be tied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	 * together to a common interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	cci_pmu->nr_irqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	 * Ensure that the device tree has as many interrupts as the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	 * of counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	mutex_init(&cci_pmu->reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	atomic_set(&cci_pmu->active_events, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	cci_pmu->cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	g_cci_pmu = cci_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				  "perf/arm/cci:online", NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 				  cci_pmu_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	ret = cci_pmu_init(cci_pmu, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		goto error_pmu_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) error_pmu_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	g_cci_pmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static int cci_pmu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if (!g_cci_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	perf_pmu_unregister(&g_cci_pmu->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	g_cci_pmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) static struct platform_driver cci_pmu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		   .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		   .of_match_table = arm_cci_pmu_matches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		   .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	.probe = cci_pmu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	.remove = cci_pmu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) module_platform_driver(cci_pmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) MODULE_DESCRIPTION("ARM CCI PMU support");