^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Performance event support for sparc64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This code is based almost entirely upon the x86 perf event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * code, which is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2009 Jaswinder Singh Rajput
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/cpudata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/pcr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "kstack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Two classes of sparc64 chips currently exist. All of which have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * 32-bit counters which can generate overflow interrupts on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * transition from 0xffffffff to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * All chips upto and including SPARC-T3 have two performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * counters. The two 32-bit counters are accessed in one go using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * single 64-bit register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * On these older chips both counters are controlled using a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * control register. The only way to stop all sampling is to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * all of the context (user, supervisor, hypervisor) sampling enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * bits. But these bits apply to both counters, thus the two counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * can't be enabled/disabled individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Furthermore, the control register on these older chips have two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * event fields, one for each of the two counters. It's thus nearly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * impossible to have one counter going while keeping the other one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * stopped. Therefore it is possible to get overflow interrupts for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * counters not currently "in use" and that condition must be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * in the overflow interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * So we use a hack, in that we program inactive counters with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * "sw_count0" and "sw_count1" events. These count how many times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * unusual way to encode a NOP and therefore will not trigger in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * normal code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Starting with SPARC-T4 we have one control register per counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * And the counters are stored in individual registers. The registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * for the counters are 64-bit but only a 32-bit counter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * implemented. The event selections on SPARC-T4 lack any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * restrictions, therefore we can elide all of the complicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * conflict resolution code we have for SPARC-T3 and earlier chips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define MAX_HWEVENTS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define MAX_PCRS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define MAX_PERIOD ((1UL << 32) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PIC_UPPER_INDEX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PIC_LOWER_INDEX 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PIC_NO_INDEX -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct cpu_hw_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Number of events currently scheduled onto this cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * This tells how many entries in the arrays below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int n_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Number of new events added since the last hw_perf_disable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * This works because the perf event layer always adds new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * events inside of a perf_{disable,enable}() sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int n_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Array of events current scheduled on this cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct perf_event *event[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Array of encoded longs, specifying the %pcr register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * encoding and the mask of PIC counters this even can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * be scheduled on. See perf_event_encode() et al.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long events[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* The current counter index assigned to an event. When the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * event hasn't been programmed into the cpu yet, this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * hold PIC_NO_INDEX. The event->hw.idx value tells us where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * we ought to schedule the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int current_idx[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Software copy of %pcr register(s) on this cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u64 pcr[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Enabled/disable state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* An event map describes the characteristics of a performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * counter event. In particular it gives the encoding as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * a mask telling which counters the event can be measured on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * The mask is unused on SPARC-T4 and later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct perf_event_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u8 pic_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define PIC_NONE 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define PIC_UPPER 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define PIC_LOWER 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Encode a perf_event_map entry into a long. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static unsigned long perf_event_encode(const struct perf_event_map *pmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static u8 perf_event_get_msk(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return val & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static u64 perf_event_get_enc(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return val >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define C(x) PERF_COUNT_HW_CACHE_##x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define CACHE_OP_UNSUPPORTED 0xfffe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define CACHE_OP_NONSENSE 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) typedef struct perf_event_map cache_map_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) [PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) [PERF_COUNT_HW_CACHE_RESULT_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct sparc_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) const struct perf_event_map *(*event_map)(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) const cache_map_t *cache_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int max_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 (*read_pmc)(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void (*write_pmc)(int, u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int upper_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int lower_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int event_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int user_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int priv_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int hv_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int irq_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int upper_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int lower_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define SPARC_PMU_HAS_CONFLICTS 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int max_hw_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int num_pcrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int num_pic_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static u32 sparc_default_read_pmc(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) val = pcr_ops->read_pic(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (idx == PIC_UPPER_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) val >>= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return val & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void sparc_default_write_pmc(int idx, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u64 shift, mask, pic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (idx == PIC_UPPER_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) shift = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mask = ((u64) 0xffffffff) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) val <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pic = pcr_ops->read_pic(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pic &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pic |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) pcr_ops->write_pic(0, pic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static const struct perf_event_map ultra3_perfmon_event_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static const struct perf_event_map *ultra3_event_map(int event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return &ultra3_perfmon_event_map[event_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static const cache_map_t ultra3_cache_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) [C(L1D)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) [C(L1I)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) [C(LL)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) [C(DTLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) [C(ITLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) [C(BPU)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) [C(NODE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static const struct sparc_pmu ultra3_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .event_map = ultra3_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .cache_map = &ultra3_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .read_pmc = sparc_default_read_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .write_pmc = sparc_default_write_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .upper_shift = 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .lower_shift = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .event_mask = 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .user_bit = PCR_UTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .priv_bit = PCR_STRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .upper_nop = 0x1c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .lower_nop = 0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) SPARC_PMU_HAS_CONFLICTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .max_hw_events = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .num_pcrs = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .num_pic_regs = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Niagara1 is very limited. The upper PIC is hard-locked to count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * only instructions, so it is free running which creates all kinds of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * problems. Some hardware designs make one wonder if the creator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * even looked at how this stuff gets used by software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static const struct perf_event_map niagara1_perfmon_event_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static const struct perf_event_map *niagara1_event_map(int event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return &niagara1_perfmon_event_map[event_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static const cache_map_t niagara1_cache_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) [C(L1D)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) [C(L1I)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) [C(LL)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) [C(DTLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) [C(ITLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) [C(BPU)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) [C(NODE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static const struct sparc_pmu niagara1_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .event_map = niagara1_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .cache_map = &niagara1_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .read_pmc = sparc_default_read_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .write_pmc = sparc_default_write_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .upper_shift = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .lower_shift = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .event_mask = 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .user_bit = PCR_UTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .priv_bit = PCR_STRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .upper_nop = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .lower_nop = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) SPARC_PMU_HAS_CONFLICTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .max_hw_events = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .num_pcrs = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .num_pic_regs = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static const struct perf_event_map niagara2_perfmon_event_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static const struct perf_event_map *niagara2_event_map(int event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return &niagara2_perfmon_event_map[event_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static const cache_map_t niagara2_cache_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) [C(L1D)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) [C(L1I)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) [C(LL)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) [C(DTLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) [C(ITLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) [C(BPU)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) [C(NODE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static const struct sparc_pmu niagara2_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .event_map = niagara2_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .cache_map = &niagara2_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) .read_pmc = sparc_default_read_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .write_pmc = sparc_default_write_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .upper_shift = 19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .lower_shift = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .event_mask = 0xfff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) .user_bit = PCR_UTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .priv_bit = PCR_STRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .hv_bit = PCR_N2_HTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .irq_bit = 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .upper_nop = 0x220,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .lower_nop = 0x220,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) SPARC_PMU_HAS_CONFLICTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .max_hw_events = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) .num_pcrs = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) .num_pic_regs = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static const struct perf_event_map niagara4_perfmon_event_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static const struct perf_event_map *niagara4_event_map(int event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return &niagara4_perfmon_event_map[event_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static const cache_map_t niagara4_cache_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) [C(L1D)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) [C(L1I)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) [C(RESULT_MISS)] = { (11 << 6) | 0x03 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) [C(LL)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) [C(OP_WRITE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) [C(OP_PREFETCH)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) [C(DTLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) [C(RESULT_MISS)] = { (17 << 6) | 0x3f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) [C(ITLB)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) [C(RESULT_MISS)] = { (6 << 6) | 0x3f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) [C(BPU)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) [C(NODE)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) [C(OP_READ)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) [ C(OP_WRITE) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) [ C(OP_PREFETCH) ] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static u32 sparc_vt_read_pmc(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) u64 val = pcr_ops->read_pic(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return val & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static void sparc_vt_write_pmc(int idx, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u64 pcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) pcr = pcr_ops->read_pcr(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* ensure ov and ntc are reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pcr_ops->write_pic(idx, val & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pcr_ops->write_pcr(idx, pcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static const struct sparc_pmu niagara4_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .event_map = niagara4_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .cache_map = &niagara4_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .read_pmc = sparc_vt_read_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .write_pmc = sparc_vt_write_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .upper_shift = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .lower_shift = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .event_mask = 0x7ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .user_bit = PCR_N4_UTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .priv_bit = PCR_N4_STRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* We explicitly don't support hypervisor tracing. The T4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * generates the overflow event for precise events via a trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * which will not be generated (ie. it's completely lost) if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * we happen to be in the hypervisor when the event triggers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * Essentially, the overflow event reporting is completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * unusable when you have hypervisor mode tracing enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .hv_bit = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .irq_bit = PCR_N4_TOE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .upper_nop = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .lower_nop = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .max_hw_events = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .num_pcrs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .num_pic_regs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static const struct sparc_pmu sparc_m7_pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) .event_map = niagara4_event_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) .cache_map = &niagara4_cache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) .read_pmc = sparc_vt_read_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .write_pmc = sparc_vt_write_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .upper_shift = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) .lower_shift = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) .event_mask = 0x7ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .user_bit = PCR_N4_UTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .priv_bit = PCR_N4_STRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* We explicitly don't support hypervisor tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) .hv_bit = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) .irq_bit = PCR_N4_TOE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) .upper_nop = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .lower_nop = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) .max_hw_events = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) .num_pcrs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) .num_pic_regs = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static const struct sparc_pmu *sparc_pmu __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static u64 event_encoding(u64 event_id, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (idx == PIC_UPPER_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) event_id <<= sparc_pmu->upper_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) event_id <<= sparc_pmu->lower_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static u64 mask_for_index(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return event_encoding(sparc_pmu->event_mask, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static u64 nop_for_index(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return event_encoding(idx == PIC_UPPER_INDEX ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sparc_pmu->upper_nop :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) sparc_pmu->lower_nop, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u64 enc, val, mask = mask_for_index(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int pcr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (sparc_pmu->num_pcrs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) pcr_index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) enc = perf_event_get_enc(cpuc->events[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) val = cpuc->pcr[pcr_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) val |= event_encoding(enc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cpuc->pcr[pcr_index] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u64 mask = mask_for_index(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) u64 nop = nop_for_index(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int pcr_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (sparc_pmu->num_pcrs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pcr_index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) val = cpuc->pcr[pcr_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) val |= nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) cpuc->pcr[pcr_index] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static u64 sparc_perf_event_update(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct hw_perf_event *hwc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int shift = 64 - 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) u64 prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) s64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) new_raw_count = sparc_pmu->read_pmc(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) new_raw_count) != prev_raw_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) delta = (new_raw_count << shift) - (prev_raw_count << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) delta >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) local64_add(delta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) local64_sub(delta, &hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static int sparc_perf_event_set_period(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct hw_perf_event *hwc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) s64 left = local64_read(&hwc->period_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) s64 period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (unlikely(period != hwc->last_period))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) left = period - (hwc->last_period - left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (unlikely(left <= -period)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) left = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (unlikely(left <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) left += period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) local64_set(&hwc->period_left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) hwc->last_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (left > MAX_PERIOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) left = MAX_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) local64_set(&hwc->prev_count, (u64)-left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static void read_in_all_counters(struct cpu_hw_events *cpuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct perf_event *cp = cpuc->event[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (cpuc->current_idx[i] != PIC_NO_INDEX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cpuc->current_idx[i] != cp->hw.idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) sparc_perf_event_update(cp, &cp->hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cpuc->current_idx[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) cpuc->current_idx[i] = PIC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (cp->hw.state & PERF_HES_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) cp->hw.state |= PERF_HES_ARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* On this PMU all PICs are programmed using a single PCR. Calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * the combined control register value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * For such chips we require that all of the events have the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * configuration, so just fetch the settings from the first entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void calculate_single_pcr(struct cpu_hw_events *cpuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!cpuc->n_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* Assign to counters all unassigned events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct perf_event *cp = cpuc->event[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct hw_perf_event *hwc = &cp->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) u64 enc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (cpuc->current_idx[i] != PIC_NO_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) sparc_perf_event_set_period(cp, hwc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cpuc->current_idx[i] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) enc = perf_event_get_enc(cpuc->events[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) cpuc->pcr[0] &= ~mask_for_index(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (hwc->state & PERF_HES_ARCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) cpuc->pcr[0] |= nop_for_index(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) cpuc->pcr[0] |= event_encoding(enc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) hwc->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static void sparc_pmu_start(struct perf_event *event, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* On this PMU each PIC has it's own PCR control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!cpuc->n_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct perf_event *cp = cpuc->event[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct hw_perf_event *hwc = &cp->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int idx = hwc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (cpuc->current_idx[i] != PIC_NO_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) cpuc->current_idx[i] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (cp->hw.state & PERF_HES_ARCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) sparc_pmu_start(cp, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct perf_event *cp = cpuc->event[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) int idx = cp->hw.idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) cpuc->pcr[idx] |= cp->hw.config_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /* If performance event entries have been added, move existing events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * around (if necessary) and then assign new entries to counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (cpuc->n_added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) read_in_all_counters(cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (sparc_pmu->num_pcrs == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) calculate_single_pcr(cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) calculate_multiple_pcrs(cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static void sparc_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (cpuc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) cpuc->enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (cpuc->n_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) update_pcrs_for_enable(cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) for (i = 0; i < sparc_pmu->num_pcrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) pcr_ops->write_pcr(i, cpuc->pcr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static void sparc_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!cpuc->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) cpuc->enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) cpuc->n_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) for (i = 0; i < sparc_pmu->num_pcrs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) u64 val = cpuc->pcr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) sparc_pmu->hv_bit | sparc_pmu->irq_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) cpuc->pcr[i] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pcr_ops->write_pcr(i, cpuc->pcr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static int active_event_index(struct cpu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (cpuc->event[i] == event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) BUG_ON(i == cpuc->n_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return cpuc->current_idx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void sparc_pmu_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) int idx = active_event_index(cpuc, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (flags & PERF_EF_RELOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) sparc_perf_event_set_period(event, &event->hw, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) event->hw.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) sparc_pmu_enable_event(cpuc, &event->hw, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void sparc_pmu_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) int idx = active_event_index(cpuc, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!(event->hw.state & PERF_HES_STOPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) sparc_pmu_disable_event(cpuc, &event->hw, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) event->hw.state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) sparc_perf_event_update(event, &event->hw, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) event->hw.state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static void sparc_pmu_del(struct perf_event *event, int _flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (event == cpuc->event[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Absorb the final count and turn off the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) sparc_pmu_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Shift remaining entries down into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * the existing slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) while (++i < cpuc->n_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) cpuc->event[i - 1] = cpuc->event[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) cpuc->events[i - 1] = cpuc->events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) cpuc->current_idx[i - 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) cpuc->current_idx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) perf_event_update_userpage(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) cpuc->n_events--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void sparc_pmu_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int idx = active_event_index(cpuc, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) sparc_perf_event_update(event, hwc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static atomic_t active_events = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static DEFINE_MUTEX(pmc_grab_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static void perf_stop_nmi_watchdog(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) stop_nmi_watchdog(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) for (i = 0; i < sparc_pmu->num_pcrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) cpuc->pcr[i] = pcr_ops->read_pcr(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static void perf_event_grab_pmc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (atomic_inc_not_zero(&active_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) mutex_lock(&pmc_grab_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (atomic_read(&active_events) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (atomic_read(&nmi_active) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) BUG_ON(atomic_read(&nmi_active) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) atomic_inc(&active_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) mutex_unlock(&pmc_grab_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static void perf_event_release_pmc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (atomic_read(&nmi_active) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) on_each_cpu(start_nmi_watchdog, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) mutex_unlock(&pmc_grab_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static const struct perf_event_map *sparc_map_cache_event(u64 config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) unsigned int cache_type, cache_op, cache_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) const struct perf_event_map *pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (!sparc_pmu->cache_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cache_type = (config >> 0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) cache_op = (config >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) cache_result = (config >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (pmap->encoding == CACHE_OP_UNSUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (pmap->encoding == CACHE_OP_NONSENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static void hw_perf_event_destroy(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) perf_event_release_pmc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /* Make sure all events can be scheduled into the hardware at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * the same time. This is simplified by the fact that we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * need to support 2 simultaneous HW events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * As a side effect, the evts[]->hw.idx values will be assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * on success. These are pending indexes. When the events are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * actually programmed into the chip, these values will propagate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * to the per-cpu cpuc->current_idx[] slots, see the code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * maybe_change_configuration() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static int sparc_check_constraints(struct perf_event **evts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) unsigned long *events, int n_ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) u8 msk0 = 0, msk1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) int idx0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* This case is possible when we are invoked from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * hw_perf_group_sched_in().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!n_ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (n_ev > sparc_pmu->max_hw_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for (i = 0; i < n_ev; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) evts[i]->hw.idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) msk0 = perf_event_get_msk(events[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (n_ev == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (msk0 & PIC_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) idx0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) BUG_ON(n_ev != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) msk1 = perf_event_get_msk(events[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* If both events can go on any counter, OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (msk0 == (PIC_UPPER | PIC_LOWER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) msk1 == (PIC_UPPER | PIC_LOWER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* If one event is limited to a specific counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * and the other can go on both, OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) msk1 == (PIC_UPPER | PIC_LOWER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (msk0 & PIC_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) idx0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) msk0 == (PIC_UPPER | PIC_LOWER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (msk1 & PIC_UPPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) idx0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* If the events are fixed to different counters, OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (msk0 & PIC_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) idx0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* Otherwise, there is a conflict. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) evts[0]->hw.idx = idx0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (n_ev == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) evts[1]->hw.idx = idx0 ^ 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int eu = 0, ek = 0, eh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) int i, n, first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) n = n_prev + n_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (n <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) event = evts[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) eu = event->attr.exclude_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ek = event->attr.exclude_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) eh = event->attr.exclude_hv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) } else if (event->attr.exclude_user != eu ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) event->attr.exclude_kernel != ek ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) event->attr.exclude_hv != eh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static int collect_events(struct perf_event *group, int max_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct perf_event *evts[], unsigned long *events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int *current_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (!is_software_event(group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (n >= max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) evts[n] = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) events[n] = group->hw.event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) current_idx[n++] = PIC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) for_each_sibling_event(event, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (!is_software_event(event) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) event->state != PERF_EVENT_STATE_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (n >= max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) evts[n] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) events[n] = event->hw.event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) current_idx[n++] = PIC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static int sparc_pmu_add(struct perf_event *event, int ef_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int n0, ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) n0 = cpuc->n_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (n0 >= sparc_pmu->max_hw_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) cpuc->event[n0] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) cpuc->events[n0] = event->hw.event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) cpuc->current_idx[n0] = PIC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (!(ef_flags & PERF_EF_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) event->hw.state |= PERF_HES_ARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * If group events scheduling transaction was started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * skip the schedulability test here, it will be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * at commit time(->commit_txn) as a whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) goto nocheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (check_excludes(cpuc->event, n0, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) nocheck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) cpuc->n_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) cpuc->n_added++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static int sparc_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct perf_event_attr *attr = &event->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct perf_event *evts[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) unsigned long events[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int current_idx_dmy[MAX_HWEVENTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) const struct perf_event_map *pmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (atomic_read(&nmi_active) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* does not support taken branch sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (has_branch_stack(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) switch (attr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) case PERF_TYPE_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (attr->config >= sparc_pmu->max_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) pmap = sparc_pmu->event_map(attr->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) case PERF_TYPE_HW_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) pmap = sparc_map_cache_event(attr->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (IS_ERR(pmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return PTR_ERR(pmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) case PERF_TYPE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) pmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (pmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) hwc->event_base = perf_event_encode(pmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * User gives us "(encoding << 16) | pic_mask" for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * PERF_TYPE_RAW events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) hwc->event_base = attr->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /* We save the enable bits in the config_base. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) hwc->config_base = sparc_pmu->irq_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (!attr->exclude_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) hwc->config_base |= sparc_pmu->user_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (!attr->exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) hwc->config_base |= sparc_pmu->priv_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (!attr->exclude_hv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) hwc->config_base |= sparc_pmu->hv_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (event->group_leader != event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) n = collect_events(event->group_leader,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) sparc_pmu->max_hw_events - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) evts, events, current_idx_dmy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) events[n] = hwc->event_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) evts[n] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (check_excludes(evts, n, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (sparc_check_constraints(evts, events, n + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) hwc->idx = PIC_NO_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Try to do all error checking before this point, as unwinding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * state after grabbing the PMC is difficult.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) perf_event_grab_pmc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) event->destroy = hw_perf_event_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (!hwc->sample_period) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) hwc->sample_period = MAX_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) hwc->last_period = hwc->sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) local64_set(&hwc->period_left, hwc->sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * Start group events scheduling transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * Set the flag to make pmu::enable() not perform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * schedulability test, it will be performed at commit time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) cpuhw->txn_flags = txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (txn_flags & ~PERF_PMU_TXN_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) perf_pmu_disable(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * Stop group events scheduling transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * Clear the flag and pmu::enable() will perform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * schedulability test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static void sparc_pmu_cancel_txn(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) unsigned int txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) txn_flags = cpuhw->txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) cpuhw->txn_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (txn_flags & ~PERF_PMU_TXN_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) perf_pmu_enable(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * Commit group events scheduling transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * Perform the group schedulability test as a whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * Return 0 if success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int sparc_pmu_commit_txn(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (!sparc_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) cpuc->txn_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) n = cpuc->n_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (check_excludes(cpuc->event, 0, n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (sparc_check_constraints(cpuc->event, cpuc->events, n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) cpuc->txn_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) perf_pmu_enable(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static struct pmu pmu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) .pmu_enable = sparc_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) .pmu_disable = sparc_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) .event_init = sparc_pmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) .add = sparc_pmu_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) .del = sparc_pmu_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) .start = sparc_pmu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) .stop = sparc_pmu_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) .read = sparc_pmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .start_txn = sparc_pmu_start_txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) .cancel_txn = sparc_pmu_cancel_txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) .commit_txn = sparc_pmu_commit_txn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) void perf_event_print_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!sparc_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) pr_info("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) for (i = 0; i < sparc_pmu->num_pcrs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) pr_info("CPU#%d: PCR%d[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) cpu, i, pcr_ops->read_pcr(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) for (i = 0; i < sparc_pmu->num_pic_regs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) pr_info("CPU#%d: PIC%d[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) cpu, i, pcr_ops->read_pic(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) unsigned long cmd, void *__args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct die_args *args = __args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct perf_sample_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct cpu_hw_events *cpuc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) u64 finish_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) u64 start_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (!atomic_read(&active_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) case DIE_NMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) start_clock = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) regs = args->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) cpuc = this_cpu_ptr(&cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /* If the PMU has the TOE IRQ enable bits, we need to do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * dummy write to the %pcr to clear the overflow bits and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * Do this before we peek at the counters to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * overflow so we don't lose any events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (sparc_pmu->irq_bit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) sparc_pmu->num_pcrs == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) pcr_ops->write_pcr(0, cpuc->pcr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) for (i = 0; i < cpuc->n_events; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct perf_event *event = cpuc->event[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) int idx = cpuc->current_idx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct hw_perf_event *hwc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (sparc_pmu->irq_bit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) sparc_pmu->num_pcrs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) val = sparc_perf_event_update(event, hwc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (val & (1ULL << 31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) perf_sample_data_init(&data, 0, hwc->last_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!sparc_perf_event_set_period(event, hwc, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (perf_event_overflow(event, &data, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) sparc_pmu_stop(event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) finish_clock = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) perf_sample_event_took(finish_clock - start_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static __read_mostly struct notifier_block perf_event_nmi_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) .notifier_call = perf_event_nmi_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static bool __init supported_pmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (!strcmp(sparc_pmu_type, "ultra3") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) !strcmp(sparc_pmu_type, "ultra3+") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) !strcmp(sparc_pmu_type, "ultra3i") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) !strcmp(sparc_pmu_type, "ultra4+")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) sparc_pmu = &ultra3_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (!strcmp(sparc_pmu_type, "niagara")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) sparc_pmu = &niagara1_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (!strcmp(sparc_pmu_type, "niagara2") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) !strcmp(sparc_pmu_type, "niagara3")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) sparc_pmu = &niagara2_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (!strcmp(sparc_pmu_type, "niagara4") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) !strcmp(sparc_pmu_type, "niagara5")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) sparc_pmu = &niagara4_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (!strcmp(sparc_pmu_type, "sparc-m7")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) sparc_pmu = &sparc_m7_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static int __init init_hw_perf_events(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) pr_info("Performance events: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) err = pcr_arch_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (err || !supported_pmu()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) register_die_notifier(&perf_event_nmi_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) pure_initcall(init_hw_perf_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) unsigned long ksp, fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) int graph = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) stack_trace_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) perf_callchain_store(entry, regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) ksp = regs->u_regs[UREG_I6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) fp = ksp + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct sparc_stackf *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (!kstack_valid(current_thread_info(), fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) sf = (struct sparc_stackf *) fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) regs = (struct pt_regs *) (sf + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (kstack_is_trap_frame(current_thread_info(), regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) pc = regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) fp = regs->u_regs[UREG_I6] + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) pc = sf->callers_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) fp = (unsigned long)sf->fp + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) perf_callchain_store(entry, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if ((pc + 8UL) == (unsigned long) &return_to_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ret_stack = ftrace_graph_get_ret_stack(current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) graph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (ret_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) pc = ret_stack->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) perf_callchain_store(entry, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) graph++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) } while (entry->nr < entry->max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) valid_user_frame(const void __user *fp, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /* addresses should be at least 4-byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (((unsigned long) fp) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) return (__range_not_ok(fp, size, TASK_SIZE) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) unsigned long ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) ufp = regs->u_regs[UREG_FP] + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct sparc_stackf __user *usf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) struct sparc_stackf sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) usf = (struct sparc_stackf __user *)ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (!valid_user_frame(usf, sizeof(sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) pc = sf.callers_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) ufp = (unsigned long)sf.fp + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) perf_callchain_store(entry, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) } while (entry->nr < entry->max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) unsigned long ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ufp = regs->u_regs[UREG_FP] & 0xffffffffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (thread32_stack_is_64bit(ufp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct sparc_stackf __user *usf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct sparc_stackf sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) ufp += STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) usf = (struct sparc_stackf __user *)ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) pc = sf.callers_pc & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) ufp = ((unsigned long) sf.fp) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct sparc_stackf32 __user *usf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct sparc_stackf32 sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) usf = (struct sparc_stackf32 __user *)ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) pc = sf.callers_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ufp = (unsigned long)sf.fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) perf_callchain_store(entry, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) } while (entry->nr < entry->max_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) u64 saved_fault_address = current_thread_info()->fault_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) u8 saved_fault_code = get_thread_fault_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) perf_callchain_store(entry, regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (!current->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) flushw_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (test_thread_flag(TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) perf_callchain_user_32(entry, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) perf_callchain_user_64(entry, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) set_thread_fault_code(saved_fault_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) current_thread_info()->fault_address = saved_fault_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }