Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Hardware performance events for the Alpha.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * We implement HW counts on the EV67 and subsequent CPUs only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * (C) 2010 Michael J. Cree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * ARM code, which are copyright by their respective authors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/hwrpb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/pal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/wrperfmon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /* The maximum number of PMCs on any Alpha CPU whatsoever. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define MAX_HWEVENTS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PMC_NO_INDEX -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /* For tracking PMCs and the hw events they monitor on each CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) struct cpu_hw_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	int			enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	/* Number of events scheduled; also number entries valid in arrays below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int			n_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	/* Number events added since last hw_perf_disable(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	int			n_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* Events currently scheduled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct perf_event	*event[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/* Event type of each scheduled event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	unsigned long		evtype[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	/* Current index of each scheduled event; if not yet determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	 * contains PMC_NO_INDEX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	int			current_idx[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/* The active PMCs' config for easy use with wrperfmon(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	unsigned long		config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	/* The active counters' indices for easy use with wrperfmon(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	unsigned long		idx_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * A structure to hold the description of the PMCs available on a particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * type of Alpha CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct alpha_pmu_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	/* Mapping of the perf system hw event types to indigenous event types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	const int *event_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/* The number of entries in the event_map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	int  max_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* The number of PMCs on this Alpha */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int  num_pmcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * All PMC counters reside in the IBOX register PCTR.  This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * LSB of the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	int  pmc_count_shift[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * The mask that isolates the PMC bits when the LSB of the counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * is shifted to bit 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	unsigned long pmc_count_mask[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/* The maximum period the PMC can count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned long pmc_max_period[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 * The maximum value that may be written to the counter due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * hardware restrictions is pmc_max_period - pmc_left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	long pmc_left[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 /* Subroutine for allocation of PMCs.  Enforces constraints. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	int (*check_constraints)(struct perf_event **, unsigned long *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* Subroutine for checking validity of a raw event for this PMU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	int (*raw_event_valid)(u64 config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * The Alpha CPU PMU description currently in operation.  This is set during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * the boot process to the specific CPU of the machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static const struct alpha_pmu_t *alpha_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define HW_OP_UNSUPPORTED -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * follow. Since they are identical we refer to them collectively as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * EV67 henceforth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * EV67 PMC event types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * There is no one-to-one mapping of the possible hw event types to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * actual codes that are used to program the PMCs hence we introduce our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * own hw event type identifiers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) enum ev67_pmc_event_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	EV67_CYCLES = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	EV67_INSTRUCTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	EV67_BCACHEMISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	EV67_MBOXREPLAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	EV67_LAST_ET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Mapping of the hw event types to the perf tool interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static const int ev67_perfmon_event_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	[PERF_COUNT_HW_CPU_CYCLES]	 = EV67_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	[PERF_COUNT_HW_INSTRUCTIONS]	 = EV67_INSTRUCTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	[PERF_COUNT_HW_CACHE_MISSES]	 = EV67_BCACHEMISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct ev67_mapping_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	int config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * The mapping used for one event only - these must be in same order as enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * ev67_pmc_event_type definition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static const struct ev67_mapping_t ev67_mapping[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	{EV67_PCTR_INSTR_CYCLES, 1},	 /* EV67_CYCLES, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	{EV67_PCTR_INSTR_CYCLES, 0},	 /* EV67_INSTRUCTIONS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	{EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	{EV67_PCTR_CYCLES_MBOX, 1}	 /* EV67_MBOXREPLAY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * Check that a group of events can be simultaneously scheduled on to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * EV67 PMU.  Also allocate counter indices and config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int ev67_check_constraints(struct perf_event **event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 				unsigned long *evtype, int n_ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int idx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	unsigned long config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	idx0 = ev67_mapping[evtype[0]-1].idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	config = ev67_mapping[evtype[0]-1].config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (n_ev == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	BUG_ON(n_ev != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		/* MBOX replay traps must be on PMC 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		/* Only cycles can accompany MBOX replay traps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (evtype[idx0] == EV67_CYCLES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			config = EV67_PCTR_CYCLES_MBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		/* Bcache misses must be on PMC 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		/* Only instructions can accompany Bcache misses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		if (evtype[idx0] == EV67_INSTRUCTIONS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			config = EV67_PCTR_INSTR_BCACHEMISS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		/* Instructions must be on PMC 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		/* By this point only cycles can accompany instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		if (evtype[idx0^1] == EV67_CYCLES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			config = EV67_PCTR_INSTR_CYCLES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/* Otherwise, darn it, there is a conflict.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	event[0]->hw.idx = idx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	event[0]->hw.config_base = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (n_ev == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		event[1]->hw.idx = idx0 ^ 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		event[1]->hw.config_base = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int ev67_raw_event_valid(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return config >= EV67_CYCLES && config < EV67_LAST_ET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static const struct alpha_pmu_t ev67_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	.event_map = ev67_perfmon_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	.num_pmcs = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	.pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK,  EV67_PCTR_1_COUNT_MASK,  0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	.pmc_left = {16, 4, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	.check_constraints = ev67_check_constraints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	.raw_event_valid = ev67_raw_event_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Helper routines to ensure that we read/write only the correct PMC bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * when calling the wrperfmon PALcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void alpha_write_pmc(int idx, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	val &= alpha_pmu->pmc_count_mask[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	val <<= alpha_pmu->pmc_count_shift[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	val |= (1<<idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	wrperfmon(PERFMON_CMD_WRITE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline unsigned long alpha_read_pmc(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	val = wrperfmon(PERFMON_CMD_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	val >>= alpha_pmu->pmc_count_shift[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	val &= alpha_pmu->pmc_count_mask[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Set a new period to sample over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int alpha_perf_event_set_period(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				struct hw_perf_event *hwc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	long left = local64_read(&hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	long period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (unlikely(left <= -period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		left = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (unlikely(left <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		left += period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * Hardware restrictions require that the counters must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	 * written with values that are too close to the maximum period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (unlikely(left < alpha_pmu->pmc_left[idx]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		left = alpha_pmu->pmc_left[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	if (left > (long)alpha_pmu->pmc_max_period[idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		left = alpha_pmu->pmc_max_period[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	local64_set(&hwc->prev_count, (unsigned long)(-left));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	alpha_write_pmc(idx, (unsigned long)(-left));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * Calculates the count (the 'delta') since the last time the PMC was read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * As the PMCs' full period can easily be exceeded within the perf system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * sampling period we cannot use any high order bits as a guard bit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * PMCs to detect overflow as is done by other architectures.  The code here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * calculates the delta on the basis that there is no overflow when ovf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * zero.  The value passed via ovf by the interrupt handler corrects for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * This can be racey on rare occasions -- a call to this routine can occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * with an overflowed counter just before the PMI service routine is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * The check for delta negative hopefully always rectifies this situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static unsigned long alpha_perf_event_update(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 					struct hw_perf_event *hwc, int idx, long ovf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	long prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	new_raw_count = alpha_read_pmc(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			     new_raw_count) != prev_raw_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	/* It is possible on very rare occasions that the PMC has overflowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * but the interrupt is yet to come.  Detect and fix this situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (unlikely(delta < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		delta += alpha_pmu->pmc_max_period[idx] + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	local64_sub(delta, &hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * Collect all HW events into the array event[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int collect_events(struct perf_event *group, int max_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			  struct perf_event *event[], unsigned long *evtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			  int *current_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct perf_event *pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (!is_software_event(group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		if (n >= max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		event[n] = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		evtype[n] = group->hw.event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		current_idx[n++] = PMC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	for_each_sibling_event(pe, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			if (n >= max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			event[n] = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			evtype[n] = pe->hw.event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			current_idx[n++] = PMC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * Check that a group of events can be simultaneously scheduled on to the PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static int alpha_check_constraints(struct perf_event **events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				   unsigned long *evtypes, int n_ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	/* No HW events is possible from hw_perf_group_sched_in(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (n_ev == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	if (n_ev > alpha_pmu->num_pmcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	return alpha_pmu->check_constraints(events, evtypes, n_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  * If new events have been scheduled then update cpuc with the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  * configuration.  This may involve shifting cycle counts from one PMC to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void maybe_change_configuration(struct cpu_hw_events *cpuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (cpuc->n_added == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	/* Find counters that are moving to another PMC and update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	for (j = 0; j < cpuc->n_events; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		struct perf_event *pe = cpuc->event[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		if (cpuc->current_idx[j] != PMC_NO_INDEX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			cpuc->current_idx[j] != pe->hw.idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			cpuc->current_idx[j] = PMC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/* Assign to counters all unassigned events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	cpuc->idx_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	for (j = 0; j < cpuc->n_events; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		struct perf_event *pe = cpuc->event[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		struct hw_perf_event *hwc = &pe->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		if (cpuc->current_idx[j] == PMC_NO_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			alpha_perf_event_set_period(pe, hwc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			cpuc->current_idx[j] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		if (!(hwc->state & PERF_HES_STOPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	cpuc->config = cpuc->event[0]->hw.config_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Schedule perf HW event on to PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  *  - this function is called from outside this module via the pmu struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  *    returned from perf event initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int alpha_pmu_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	int n0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	 * The Sparc code has the IRQ disable first followed by the perf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	 * disable, however this can lead to an overflowed counter with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	 * PMI disabled on rare occasions.  The alpha_perf_event_update()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	 * routine should detect this situation by noting a negative delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	 * nevertheless we disable the PMCs first to enable a potential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	 * final PMI to occur before we disable interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	perf_pmu_disable(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	local_irq_save(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	/* Default to error to be returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	/* Insert event on to PMU and if successful modify ret to valid return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	n0 = cpuc->n_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if (n0 < alpha_pmu->num_pmcs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		cpuc->event[n0] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		cpuc->evtype[n0] = event->hw.event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		cpuc->current_idx[n0] = PMC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			cpuc->n_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			cpuc->n_added++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	hwc->state = PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	if (!(flags & PERF_EF_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		hwc->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	local_irq_restore(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	perf_pmu_enable(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Disable performance monitoring unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)  *  - this function is called from outside this module via the pmu struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  *    returned from perf event initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void alpha_pmu_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	perf_pmu_disable(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	local_irq_save(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	for (j = 0; j < cpuc->n_events; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		if (event == cpuc->event[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			int idx = cpuc->current_idx[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			/* Shift remaining entries down into the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			 * slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			while (++j < cpuc->n_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 				cpuc->event[j - 1] = cpuc->event[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 				cpuc->evtype[j - 1] = cpuc->evtype[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 				cpuc->current_idx[j - 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 					cpuc->current_idx[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			/* Absorb the final count and turn off the event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			alpha_perf_event_update(event, hwc, idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			cpuc->idx_mask &= ~(1UL<<idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			cpuc->n_events--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	local_irq_restore(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	perf_pmu_enable(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static void alpha_pmu_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	alpha_perf_event_update(event, hwc, hwc->idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void alpha_pmu_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (!(hwc->state & PERF_HES_STOPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		cpuc->idx_mask &= ~(1UL<<hwc->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		hwc->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		alpha_perf_event_update(event, hwc, hwc->idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		hwc->state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (cpuc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static void alpha_pmu_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	if (flags & PERF_EF_RELOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		alpha_perf_event_set_period(event, hwc, hwc->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	cpuc->idx_mask |= 1UL<<hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	if (cpuc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  * Check that CPU performance counters are supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * - currently support EV67 and later CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  * - actually some later revisions of the EV6 have the same PMC model as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  *     EV67 but we don't do suffiently deep CPU detection to detect them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  *     Bad luck to the very few people who might have one, I guess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int supported_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	struct percpu_struct *cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	unsigned long cputype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	/* Get cpu type from HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	cputype = cpu->type & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	/* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void hw_perf_event_destroy(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	/* Nothing to be done! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static int __hw_perf_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct perf_event_attr *attr = &event->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	struct perf_event *evts[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	unsigned long evtypes[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	int idx_rubbish_bin[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	int ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	/* We only support a limited range of HARDWARE event types with one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 * only programmable via a RAW event type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	if (attr->type == PERF_TYPE_HARDWARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		if (attr->config >= alpha_pmu->max_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		ev = alpha_pmu->event_map[attr->config];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	} else if (attr->type == PERF_TYPE_HW_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	} else if (attr->type == PERF_TYPE_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		if (!alpha_pmu->raw_event_valid(attr->config))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		ev = attr->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (ev < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		return ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	 * We place the event type in event_base here and leave calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	 * of the codes to programme the PMU for alpha_pmu_enable() because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	 * it is only then we will know what HW events are actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	 * scheduled on to the PMU.  At that point the code to programme the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	 * PMU is put into config_base and the PMC to use is placed into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	 * idx.  We initialise idx (below) to PMC_NO_INDEX to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	 * it is yet to be determined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	hwc->event_base = ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	/* Collect events in a group together suitable for calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	 * alpha_check_constraints() to verify that the group as a whole can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	 * be scheduled on to the PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	if (event->group_leader != event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		n = collect_events(event->group_leader,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 				alpha_pmu->num_pmcs - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 				evts, evtypes, idx_rubbish_bin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	evtypes[n] = hwc->event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	evts[n] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (alpha_check_constraints(evts, evtypes, n + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	/* Indicate that PMU config and idx are yet to be determined. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	hwc->config_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	hwc->idx = PMC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	event->destroy = hw_perf_event_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	 * Most architectures reserve the PMU for their use at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	 * As there is no existing mechanism to arbitrate usage and there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	 * appears to be no other user of the Alpha PMU we just assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	 * that we can just use it, hence a NO-OP here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	 * Maybe an alpha_reserve_pmu() routine should be implemented but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	 * anything else ever going to use it?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	if (!hwc->sample_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		hwc->sample_period = alpha_pmu->pmc_max_period[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		hwc->last_period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		local64_set(&hwc->period_left, hwc->sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)  * Main entry point to initialise a HW performance event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static int alpha_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	/* does not support taken branch sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	if (has_branch_stack(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	switch (event->attr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	case PERF_TYPE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	case PERF_TYPE_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	case PERF_TYPE_HW_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	if (!alpha_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	/* Do the real initialisation work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	err = __hw_perf_event_init(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)  * Main entry point - enable HW performance counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void alpha_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	if (cpuc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	cpuc->enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	if (cpuc->n_events > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		/* Update cpuc with information from any new scheduled events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		maybe_change_configuration(cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		/* Start counting the desired events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)  * Main entry point - disable HW performance counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static void alpha_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	if (!cpuc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	cpuc->enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	cpuc->n_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static struct pmu pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	.pmu_enable	= alpha_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	.pmu_disable	= alpha_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	.event_init	= alpha_pmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	.add		= alpha_pmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	.del		= alpha_pmu_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	.start		= alpha_pmu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	.stop		= alpha_pmu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	.read		= alpha_pmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)  * Main entry point - don't know when this is called but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)  * obviously dumps debug info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) void perf_event_print_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	unsigned long pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	int pcr0, pcr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	if (!supported_cpu())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	pcr = wrperfmon(PERFMON_CMD_READ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)  * Performance Monitoring Interrupt Service Routine called when a PMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)  * overflows.  The PMC that overflowed is passed in la_ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static void alpha_perf_event_irq_handler(unsigned long la_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 					struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	struct cpu_hw_events *cpuc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	struct perf_sample_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	int idx, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	__this_cpu_inc(irq_pmi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	/* Completely counting through the PMC's period to trigger a new PMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	 * overflow interrupt while in this interrupt routine is utterly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	 * disastrous!  The EV6 and EV67 counters are sufficiently large to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	 * prevent this but to be really sure disable the PMCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	/* la_ptr is the counter that overflowed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		/* This should never occur! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		irq_err_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		pr_warn("PMI: silly index %ld\n", la_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	idx = la_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	for (j = 0; j < cpuc->n_events; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 		if (cpuc->current_idx[j] == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	if (unlikely(j == cpuc->n_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		/* This can occur if the event is disabled right on a PMC overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	event = cpuc->event[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	if (unlikely(!event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 		/* This should never occur! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		irq_err_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 		pr_warn("PMI: No event at index %d!\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 		wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	perf_sample_data_init(&data, 0, hwc->last_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	if (alpha_perf_event_set_period(event, hwc, idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 		if (perf_event_overflow(event, &data, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 			/* Interrupts coming too quickly; "throttle" the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 			 * counter, i.e., disable it for a little while.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 			alpha_pmu_stop(event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)  * Init call to initialise performance events at kernel startup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int __init init_hw_perf_events(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 	pr_info("Performance events: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	if (!supported_cpu()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 		pr_cont("No support for your CPU.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	pr_cont("Supported CPU type!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 	/* Override performance counter IRQ vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	perf_irq = alpha_perf_event_irq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	/* And set up PMU specification */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	alpha_pmu = &ev67_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) early_initcall(init_hw_perf_events);