Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * ARM performance counter support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * This code is based on the sparc64 perf event code, which is in turn based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * on the x86 code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define pr_fmt(fmt) "hw perfevents: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/perf/arm_pmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/irqdesc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static int armpmu_count_irq_users(const int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) struct pmu_irq_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	void (*enable_pmuirq)(unsigned int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	void (*disable_pmuirq)(unsigned int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	free_irq(irq, per_cpu_ptr(devid, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static const struct pmu_irq_ops pmuirq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	.enable_pmuirq = enable_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	.disable_pmuirq = disable_irq_nosync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	.free_pmuirq = armpmu_free_pmuirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	free_nmi(irq, per_cpu_ptr(devid, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static const struct pmu_irq_ops pmunmi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	.enable_pmuirq = enable_nmi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	.disable_pmuirq = disable_nmi_nosync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	.free_pmuirq = armpmu_free_pmunmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static void armpmu_enable_percpu_pmuirq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	enable_percpu_irq(irq, IRQ_TYPE_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 				   void __percpu *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	if (armpmu_count_irq_users(irq) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		free_percpu_irq(irq, devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static const struct pmu_irq_ops percpu_pmuirq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	.enable_pmuirq = armpmu_enable_percpu_pmuirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	.disable_pmuirq = disable_percpu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.free_pmuirq = armpmu_free_percpu_pmuirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static void armpmu_enable_percpu_pmunmi(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (!prepare_percpu_nmi(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		enable_percpu_nmi(irq, IRQ_TYPE_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static void armpmu_disable_percpu_pmunmi(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	disable_percpu_nmi(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	teardown_percpu_nmi(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 				      void __percpu *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	if (armpmu_count_irq_users(irq) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		free_percpu_nmi(irq, devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static const struct pmu_irq_ops percpu_pmunmi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	.enable_pmuirq = armpmu_enable_percpu_pmunmi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	.disable_pmuirq = armpmu_disable_percpu_pmunmi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	.free_pmuirq = armpmu_free_percpu_pmunmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static DEFINE_PER_CPU(int, cpu_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static bool has_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static inline u64 arm_pmu_event_max_period(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (event->hw.flags & ARMPMU_EVT_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		return GENMASK_ULL(63, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		return GENMASK_ULL(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) armpmu_map_cache_event(const unsigned (*cache_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 				      [PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 				      [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		       u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	unsigned int cache_type, cache_op, cache_result, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	cache_type = (config >>  0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	cache_op = (config >>  8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	cache_result = (config >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (!cache_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	if (ret == CACHE_OP_UNSUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	int mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	if (config >= PERF_COUNT_HW_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	if (!event_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	mapping = (*event_map)[config];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) armpmu_map_raw_event(u32 raw_event_mask, u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return (int)(config & raw_event_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) armpmu_map_event(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		 const unsigned (*cache_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 				[PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 				[PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 				[PERF_COUNT_HW_CACHE_RESULT_MAX],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		 u32 raw_event_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	u64 config = event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	int type = event->attr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (type == event->pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		return armpmu_map_raw_event(raw_event_mask, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	case PERF_TYPE_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return armpmu_map_hw_event(event_map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	case PERF_TYPE_HW_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		return armpmu_map_cache_event(cache_map, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	case PERF_TYPE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		return armpmu_map_raw_event(raw_event_mask, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) int armpmu_event_set_period(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	s64 left = local64_read(&hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	s64 period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	u64 max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	max_period = arm_pmu_event_max_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (unlikely(left <= -period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		left = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (unlikely(left <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		left += period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * Limit the maximum period to prevent the counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * from overtaking the one we are about to program. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 * effect we are reducing max_period to account for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 * interrupt latency (and we are being very conservative).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (left > (max_period >> 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		left = (max_period >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	local64_set(&hwc->prev_count, (u64)-left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	armpmu->write_counter(event, (u64)(-left) & max_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) u64 armpmu_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	u64 delta, prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	u64 max_period = arm_pmu_event_max_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	new_raw_count = armpmu->read_counter(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			     new_raw_count) != prev_raw_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	delta = (new_raw_count - prev_raw_count) & max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	local64_sub(delta, &hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	return new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) armpmu_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	armpmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) armpmu_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	 * ARM pmu always has to update the counter, so ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	 * PERF_EF_UPDATE, see comments in armpmu_start().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (!(hwc->state & PERF_HES_STOPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		armpmu->disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		armpmu_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static void armpmu_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 * ARM pmu always has to reprogram the period, so ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 * PERF_EF_RELOAD, see the comment below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (flags & PERF_EF_RELOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 * Set the period again. Some counters can't be stopped, so when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 * were stopped we simply disabled the IRQ source and the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * may have been left counting. If we don't do this step then we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 * get an interrupt too soon or *way* too late if the overflow has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * happened since disabling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	armpmu_event_set_period(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	armpmu->enable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) armpmu_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	armpmu_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	hw_events->events[idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	armpmu->clear_event_idx(hw_events, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	/* Clear the allocated counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	hwc->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) armpmu_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	/* An event following a process won't be stopped earlier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	/* If we don't have a space for the counter then finish early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	idx = armpmu->get_event_idx(hw_events, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * If there is an event in the counter we are going to use then make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * sure it is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	event->hw.idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	armpmu->disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	hw_events->events[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		armpmu_start(event, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	/* Propagate our changes to the userspace mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			       struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	struct arm_pmu *armpmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (is_software_event(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 * core perf code won't check that the pmu->ctx == leader->ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 * until after pmu->event_init(event).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (event->pmu != pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (event->state < PERF_EVENT_STATE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	return armpmu->get_event_idx(hw_events, event) >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) validate_group(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct perf_event *sibling, *leader = event->group_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	struct pmu_hw_events fake_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * Initialise the fake PMU. We only need to populate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * used_mask for the purposes of validation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	if (!validate_event(event->pmu, &fake_pmu, leader))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	for_each_sibling_event(sibling, leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		if (!validate_event(event->pmu, &fake_pmu, sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (!validate_event(event->pmu, &fake_pmu, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct arm_pmu *armpmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	u64 start_clock, finish_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * do any necessary shifting, we just need to perform the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * dereference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	armpmu = *(void **)dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (WARN_ON_ONCE(!armpmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	start_clock = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	ret = armpmu->handle_irq(armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	finish_clock = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	perf_sample_event_took(finish_clock - start_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) __hw_perf_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	int mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	hwc->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	mapping = armpmu->map_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (mapping < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		pr_debug("event %x:%llx not supported\n", event->attr.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			 event->attr.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * We don't assign an index until we actually place the event onto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * hardware. Use -1 to signify that we haven't decided where to put it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 * yet. For SMP systems, each core has it's own PMU so we can't do any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 * clever allocation or constraints checking at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	hwc->idx		= -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	hwc->config_base	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	hwc->config		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	hwc->event_base		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	 * Check whether we need to exclude the counter from certain modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (armpmu->set_event_filter &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	    armpmu->set_event_filter(hwc, &event->attr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		pr_debug("ARM performance counters do not support "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			 "mode exclusion\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * Store the event encoding into the config_base field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	hwc->config_base	    |= (unsigned long)mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (!is_sampling_event(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		 * For non-sampling runs, limit the sample_period to half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		 * of the counter width. That way, the new counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		 * is far less likely to overtake the previous one unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		 * you have some serious IRQ latency issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		hwc->sample_period  = arm_pmu_event_max_period(event) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		hwc->last_period    = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		local64_set(&hwc->period_left, hwc->sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	if (event->group_leader != event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		if (validate_group(event) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static int armpmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * Reject CPU-affine events for CPUs that are of a different class to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 * that which this PMU handles. Process-following events (where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	 * reject them later (in armpmu_add) if they're scheduled on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 * different class of CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (event->cpu != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	/* does not support taken branch sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (has_branch_stack(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (armpmu->map_event(event) == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	return __hw_perf_event_init(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static void armpmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	/* For task-bound events we may be called on other CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		armpmu->start(armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) static void armpmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	/* For task-bound events we may be called on other CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	armpmu->stop(armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * In heterogeneous systems, events are specific to a particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * the same microarchitecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static int armpmu_filter_match(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (ret && armpmu->filter_match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		return armpmu->filter_match(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static ssize_t armpmu_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) static struct attribute *armpmu_common_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	&dev_attr_cpus.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static struct attribute_group armpmu_common_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	.attrs = armpmu_common_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) /* Set at runtime when we know what CPU type we are. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static struct arm_pmu *__oprofile_cpu_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * Despite the names, these two functions are CPU-specific and are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * by the OProfile/perf code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) const char *perf_pmu_name(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (!__oprofile_cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return __oprofile_cpu_pmu->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) EXPORT_SYMBOL_GPL(perf_pmu_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) int perf_num_counters(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	int max_events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (__oprofile_cpu_pmu != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		max_events = __oprofile_cpu_pmu->num_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	return max_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) EXPORT_SYMBOL_GPL(perf_num_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static int armpmu_count_irq_users(const int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	int cpu, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if (per_cpu(cpu_irq, cpu) == irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	const struct pmu_irq_ops *ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		if (per_cpu(cpu_irq, cpu) != irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		ops = per_cpu(cpu_irq_ops, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		if (ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	return ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) void armpmu_free_irq(int irq, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (per_cpu(cpu_irq, cpu) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	per_cpu(cpu_irq, cpu) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	per_cpu(cpu_irq_ops, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) int armpmu_request_irq(int irq, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	const irq_handler_t handler = armpmu_dispatch_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	const struct pmu_irq_ops *irq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (!irq_is_percpu_devid(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		err = irq_force_affinity(irq, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		if (err && num_possible_cpus() > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 				irq, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		irq_flags = IRQF_PERCPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			    IRQF_NOBALANCING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			    IRQF_NO_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		err = request_nmi(irq, handler, irq_flags, "arm-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				  per_cpu_ptr(&cpu_armpmu, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		/* If cannot get an NMI, get a normal interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			err = request_irq(irq, handler, irq_flags, "arm-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 					  per_cpu_ptr(&cpu_armpmu, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			irq_ops = &pmuirq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			has_nmi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			irq_ops = &pmunmi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	} else if (armpmu_count_irq_users(irq) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		/* If cannot get an NMI, get a normal interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			err = request_percpu_irq(irq, handler, "arm-pmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 						 &cpu_armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			irq_ops = &percpu_pmuirq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			has_nmi= true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			irq_ops = &percpu_pmunmi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		/* Per cpudevid irq was already requested by another CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		irq_ops = armpmu_find_irq_ops(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (WARN_ON(!irq_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	per_cpu(cpu_irq, cpu) = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	per_cpu(cpu_irq_ops, cpu) = irq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return per_cpu(hw_events->irq, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  * PMU hardware loses all context when a CPU goes offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * When a CPU is hotplugged back in, since some hardware registers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  * junk values out of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (pmu->reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		pmu->reset(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	per_cpu(cpu_armpmu, cpu) = pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	irq = armpmu_get_cpu_irq(pmu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	irq = armpmu_get_cpu_irq(pmu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	per_cpu(cpu_armpmu, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) #ifdef CONFIG_CPU_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	for (idx = 0; idx < armpmu->num_events; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		event = hw_events->events[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		case CPU_PM_ENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			 * Stop and update the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			armpmu_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		case CPU_PM_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		case CPU_PM_ENTER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			 /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			  * Restore and enable the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			  * armpmu_start() indirectly calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			  * perf_event_update_userpage()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			  * that requires RCU read locking to be functional,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			  * wrap the call within RCU_NONIDLE to make the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			  * RCU subsystem aware this cpu is not idle from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			  * an RCU perspective for the armpmu_start() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			  * duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			     void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	 * Always reset the PMU registers on power-up even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	 * there are no events running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (cmd == CPU_PM_EXIT && armpmu->reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		armpmu->reset(armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (!enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	case CPU_PM_ENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		armpmu->stop(armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		cpu_pm_pmu_setup(armpmu, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	case CPU_PM_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	case CPU_PM_ENTER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		cpu_pm_pmu_setup(armpmu, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		armpmu->start(armpmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 				       &cpu_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	err = cpu_pm_pmu_register(cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 					    &cpu_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	cpu_pm_pmu_unregister(cpu_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 					    &cpu_pmu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) static struct arm_pmu *__armpmu_alloc(gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	struct arm_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	pmu = kzalloc(sizeof(*pmu), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (!pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		pr_info("failed to allocate PMU device!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (!pmu->hw_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		pr_info("failed to allocate per-cpu PMU data.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		goto out_free_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	pmu->pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		.pmu_enable	= armpmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		.pmu_disable	= armpmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		.event_init	= armpmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		.add		= armpmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		.del		= armpmu_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		.start		= armpmu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		.stop		= armpmu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		.read		= armpmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		.filter_match	= armpmu_filter_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		.attr_groups	= pmu->attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		 * This is a CPU PMU potentially in a heterogeneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		 * and we have taken ctx sharing into account (e.g. with our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 * pmu::filter_match callback and pmu::event_init group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 * validation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		.capabilities	= PERF_PMU_CAP_HETEROGENEOUS_CPUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		&armpmu_common_attr_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		struct pmu_hw_events *events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		events = per_cpu_ptr(pmu->hw_events, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		raw_spin_lock_init(&events->pmu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		events->percpu_pmu = pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	return pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) out_free_pmu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	kfree(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) struct arm_pmu *armpmu_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return __armpmu_alloc(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) struct arm_pmu *armpmu_alloc_atomic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	return __armpmu_alloc(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) void armpmu_free(struct arm_pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	free_percpu(pmu->hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	kfree(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) int armpmu_register(struct arm_pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	ret = cpu_pmu_init(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (!pmu->set_event_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	if (!__oprofile_cpu_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		__oprofile_cpu_pmu = pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	pr_info("enabled with %s PMU driver, %d counters available%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		pmu->name, pmu->num_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		has_nmi ? ", using NMIs" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) out_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	cpu_pmu_destroy(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) static int arm_pmu_hp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				      "perf/arm/pmu:starting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				      arm_perf_starting_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				      arm_perf_teardown_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		       ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) subsys_initcall(arm_pmu_hp_init);