^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Performance events x86 architecture header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009 Jaswinder Singh Rajput
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2009 Google, Inc., Stephane Eranian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * For licencing details see kernel-base/COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/intel_ds.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* To enable MSR tracing please use the generic trace points. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * | NHM/WSM | SNB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * register -------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * | HT | no HT | HT | no HT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *-----------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * offcore | core | core | cpu | core |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * lbr_sel | core | core | cpu | core |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * ld_lat | cpu | core | cpu | core |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *-----------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Given that there is a small number of shared regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * we can pre-allocate their slot in the per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * per-core reg tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) enum extra_reg_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) EXTRA_REG_NONE = -1, /* not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) EXTRA_REG_LBR = 2, /* lbr_select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) EXTRA_REG_FE = 4, /* fe_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) EXTRA_REG_MAX /* number of entries needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct event_constraint {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u64 idxmsk64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u64 code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u64 cmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline bool constraint_match(struct event_constraint *c, u64 ecode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return ((ecode & c->cmask) - c->code) <= (u64)c->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * struct hw_perf_event.flags flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define PERF_X86_EVENT_TOPDOWN 0x4000 /* Count Topdown slots/metrics events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline bool is_topdown_count(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline bool is_metric_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u64 config = event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline bool is_slots_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline bool is_topdown_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return is_metric_event(event) || is_slots_event(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct amd_nb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int nb_id; /* NorthBridge id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int refcnt; /* reference count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct perf_event *owners[X86_PMC_IDX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct event_constraint event_constraints[X86_PMC_IDX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define PEBS_OUTPUT_OFFSET 61
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Flags PEBS can handle without an PMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * TID can only be handled by flushing at context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * REGS_USER can be handled for events limited to ring 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define LARGE_PEBS_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) PERF_SAMPLE_PERIOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define PEBS_GP_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ((1ULL << PERF_REG_X86_AX) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) (1ULL << PERF_REG_X86_BX) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (1ULL << PERF_REG_X86_CX) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (1ULL << PERF_REG_X86_DX) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) (1ULL << PERF_REG_X86_DI) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) (1ULL << PERF_REG_X86_SI) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) (1ULL << PERF_REG_X86_SP) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) (1ULL << PERF_REG_X86_BP) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) (1ULL << PERF_REG_X86_IP) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) (1ULL << PERF_REG_X86_FLAGS) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) (1ULL << PERF_REG_X86_R8) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) (1ULL << PERF_REG_X86_R9) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) (1ULL << PERF_REG_X86_R10) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) (1ULL << PERF_REG_X86_R11) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) (1ULL << PERF_REG_X86_R12) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) (1ULL << PERF_REG_X86_R13) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) (1ULL << PERF_REG_X86_R14) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) (1ULL << PERF_REG_X86_R15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Per register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct er_account {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) raw_spinlock_t lock; /* per-core: protect structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u64 config; /* extra MSR config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u64 reg; /* extra MSR number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) atomic_t ref; /* reference count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Per core/cpu state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Used to coordinate shared registers between HT threads or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * among events on a single PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct intel_shared_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct er_account regs[EXTRA_REG_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int refcnt; /* per-core: #HT threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned core_id; /* per-core: core id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) enum intel_excl_state_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) INTEL_EXCL_UNUSED = 0, /* counter is unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct intel_excl_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) enum intel_excl_state_type state[X86_PMC_IDX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bool sched_started; /* true if scheduling has started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct intel_excl_cntrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct intel_excl_states states[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u16 has_exclusive[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 exclusive_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int refcnt; /* per-core: #HT threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned core_id; /* per-core: core id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct x86_perf_task_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define MAX_LBR_ENTRIES 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) LBR_FORMAT_32 = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) LBR_FORMAT_LIP = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) LBR_FORMAT_EIP = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) LBR_FORMAT_EIP_FLAGS = 0x03,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) LBR_FORMAT_EIP_FLAGS2 = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) LBR_FORMAT_INFO = 0x05,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) LBR_FORMAT_TIME = 0x06,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) X86_PERF_KFREE_SHARED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) X86_PERF_KFREE_EXCL = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) X86_PERF_KFREE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct cpu_hw_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Generic x86 PMC bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int n_events; /* the # of events in the below arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int n_added; /* the # last events in the below arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) they've never been enabled yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int n_txn; /* the # last events in the below arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) added in the current transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int n_txn_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int n_txn_metric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u64 tags[X86_PMC_IDX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int n_excl; /* the number of exclusive events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned int txn_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int is_fake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Intel DebugStore bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct debug_store *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void *ds_pebs_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) void *ds_bts_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u64 pebs_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int n_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int n_large_pebs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int n_pebs_via_pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int pebs_output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Current super set of events hardware configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u64 pebs_data_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u64 active_pebs_data_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int pebs_record_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Intel LBR bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int lbr_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int lbr_pebs_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct perf_branch_stack lbr_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct er_account *lbr_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct er_account *lbr_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u64 br_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void *last_task_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int last_log_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int lbr_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) void *lbr_xsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Intel host/guest exclude bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u64 intel_ctrl_guest_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u64 intel_ctrl_host_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Intel checkpoint mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u64 intel_cp_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * manage shared (per-core, per-cpu) registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * used on Intel NHM/WSM/SNB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct intel_shared_regs *shared_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * manage exclusive counter access between hyperthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct event_constraint *constraint_list; /* in enable order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct intel_excl_cntrs *excl_cntrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int excl_thread_id; /* 0 or 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * SKL TSX_FORCE_ABORT shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u64 tfa_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Perf Metrics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* number of accepted metrics events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int n_metric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * AMD specific bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct amd_nb *amd_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u64 perf_ctr_virt_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int n_pair; /* Large increment events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void *kfree_on_online[X86_PERF_KFREE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) { .idxmsk64 = (n) }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .code = (c), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .size = (e) - (c), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .cmask = (m), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .weight = (w), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .overlap = (o), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .flags = f, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define EVENT_CONSTRAINT(c, n, m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * The constraint_match() function only works for 'simple' event codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 0, PERF_X86_EVENT_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * The overlap flag marks event constraints with overlapping counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * masks. This is the case if the counter mask of such an event is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * a subset of any other counter mask of a constraint with an equal or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * higher weight, e.g.:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * The event scheduler may not select the correct counter in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * cycle because it needs to know which subsequent events will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * scheduled. It may fail to schedule the events then. So we set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * overlap flag for such constraints to give the scheduler a hint which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * events to select for counter rescheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * Care must be taken as the rescheduling algorithm is O(n!) which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * will increase scheduling cycles for an over-committed system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * and its counter masks must be kept at a minimum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * Constraint on the Event code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define INTEL_EVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Constraint on a range of Event codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Constraint on the Event code + UMask + fixed-mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * filter mask to validate fixed counter events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * the following filters disqualify for fixed counters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * - inv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * - edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * - cnt-mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * - in_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * - in_tx_checkpointed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * The other filters are supported by fixed counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * The any-thread option is supported starting with v3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #define FIXED_EVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * The special metric counters do not actually exist. They are calculated from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * the combination of the FxCtr3 + MSR_PERF_METRICS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * The special metric counters are mapped to a dummy offset for the scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * The sharing between multiple users of the same metric without multiplexing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * is not allowed, even though the hardware supports that in principle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #define METRIC_EVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) INTEL_ARCH_EVENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Constraint on the Event code + UMask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) #define INTEL_UEVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Constraint on specific umask bit only + event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Like UEVENT_CONSTRAINT, but match flags too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #define INTEL_PLD_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #define INTEL_PST_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* Event constraint, but match on all event flags too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Check only flags, but allow all event/umask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* Check flags and event code, and set the HSW store flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Check flags and event code, and set the HSW load flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) __EVENT_CONSTRAINT_RANGE(code, end, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) HWEIGHT(n), 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Check flags and event code/umask, and set the HSW store flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) HWEIGHT(n), 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* Check flags and event code/umask, and set the HSW load flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) HWEIGHT(n), 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* Check flags and event code/umask, and set the HSW N/A flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __EVENT_CONSTRAINT(code, n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * We define the end marker as having a weight of -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * to enable blacklisting of events using a counter bitmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * of zero and thus a weight of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * The end marker has a weight that cannot possibly be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * obtained from counting the bits in the bitmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) #define EVENT_CONSTRAINT_END { .weight = -1 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Check for end marker with weight == -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #define for_each_event_constraint(e, c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) for ((e) = (c); (e)->weight != -1; (e)++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Extra registers for specific events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Some events need large masks and require external MSRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Those extra MSRs end up being shared for all events on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * a PMU and sometimes between PMU of sibling HT threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * In either case, the kernel needs to handle conflicting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * accesses to those extra, shared, regs. The data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * to manage those registers is stored in cpu_hw_event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct extra_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned int event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned int msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u64 config_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u64 valid_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int idx; /* per_xxx->regs[] reg index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) bool extra_msr_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .event = (e), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .msr = (ms), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .config_mask = (m), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) .valid_mask = (vm), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .idx = EXTRA_REG_##i, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) .extra_msr_access = true, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) INTEL_UEVENT_EXTRA_REG(c, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) MSR_PEBS_LD_LAT_THRESHOLD, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 0xffff, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) LDLAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) union perf_capabilities {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u64 lbr_format:6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u64 pebs_trap:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u64 pebs_arch_reg:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u64 pebs_format:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) u64 smm_freeze:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * PMU supports separate counter range for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * values > 32bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u64 full_width_write:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u64 pebs_baseline:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u64 perf_metrics:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) u64 pebs_output_pt_available:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u64 anythread_deprecated:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u64 capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct x86_pmu_quirk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct x86_pmu_quirk *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void (*func)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) union x86_pmu_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u64 event:8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) umask:8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) usr:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) os:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) edge:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) pc:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) interrupt:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) __reserved1:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) en:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) inv:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) cmask:8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) event2:4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) __reserved2:4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) go:1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ho:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) } bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) x86_lbr_exclusive_lbr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) x86_lbr_exclusive_bts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) x86_lbr_exclusive_pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) x86_lbr_exclusive_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * struct x86_pmu - generic x86 pmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct x86_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Generic x86 PMC bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) int (*handle_irq)(struct pt_regs *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void (*disable_all)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) void (*enable_all)(int added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) void (*enable)(struct perf_event *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) void (*disable)(struct perf_event *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) void (*add)(struct perf_event *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) void (*del)(struct perf_event *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) void (*read)(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int (*hw_config)(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unsigned eventsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned perfctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int (*addr_offset)(int index, bool eventsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int (*rdpmc_index)(int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) u64 (*event_map)(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int max_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int num_counters_fixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int cntval_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u64 cntval_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unsigned long events_maskl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int events_mask_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u64 max_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct event_constraint *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) (*get_event_constraints)(struct cpu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) void (*put_event_constraints)(struct cpu_hw_events *cpuc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) void (*start_scheduling)(struct cpu_hw_events *cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) void (*stop_scheduling)(struct cpu_hw_events *cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct event_constraint *event_constraints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct x86_pmu_quirk *quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int perfctr_second_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u64 (*limit_period)(struct perf_event *event, u64 l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* PMI handler bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unsigned int late_ack :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) enabled_ack :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) counter_freezing :1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * sysfs attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int attr_rdpmc_broken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int attr_rdpmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct attribute **format_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ssize_t (*events_sysfs_show)(char *page, u64 config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) const struct attribute_group **attr_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) unsigned long attr_freeze_on_smi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * CPU Hotplug hooks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int (*cpu_prepare)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) void (*cpu_starting)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) void (*cpu_dying)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void (*cpu_dead)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void (*check_microcode)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) void (*sched_task)(struct perf_event_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) bool sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Intel Arch Perfmon v2+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) u64 intel_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) union perf_capabilities intel_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Intel DebugStore bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) unsigned int bts :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) bts_active :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pebs :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pebs_active :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pebs_broken :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) pebs_prec_dist :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pebs_no_tlb :1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) pebs_no_isolation :1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int pebs_record_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int pebs_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int max_pebs_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct event_constraint *pebs_constraints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) void (*pebs_aliases)(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) unsigned long large_pebs_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) u64 rtm_abort_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Intel LBR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unsigned int lbr_tos, lbr_from, lbr_to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) lbr_info, lbr_nr; /* LBR base regs and size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) u64 lbr_sel_mask; /* LBR_SELECT valid bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u64 lbr_ctl_mask; /* LBR_CTL valid bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) const int *lbr_sel_map; /* lbr_select mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int *lbr_ctl_map; /* LBR_CTL mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) bool lbr_double_abort; /* duplicated lbr aborts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Intel Architectural LBR CPUID Enumeration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned int lbr_depth_mask:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned int lbr_deep_c_reset:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unsigned int lbr_lip:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned int lbr_cpl:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned int lbr_filter:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unsigned int lbr_call_stack:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int lbr_mispred:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned int lbr_timed_lbr:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) unsigned int lbr_br_type:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) void (*lbr_reset)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) void (*lbr_read)(struct cpu_hw_events *cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) void (*lbr_save)(void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) void (*lbr_restore)(void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Intel PT/LBR/BTS are exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) atomic_t lbr_exclusive[x86_lbr_exclusive_max];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * Intel perf metrics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u64 (*update_topdown_event)(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int (*set_topdown_event_period)(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * perf task context (i.e. struct perf_event_context::task_ctx_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * switch helper to bridge calls from perf/core to perf/x86.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * See struct pmu::swap_task_ctx() usage for examples;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void (*swap_task_ctx)(struct perf_event_context *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct perf_event_context *next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * AMD bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) unsigned int amd_nb_constraints : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) u64 perf_ctr_pair_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Extra registers for events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct extra_reg *extra_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Intel host/guest support (KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int (*check_period) (struct perf_event *event, u64 period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int (*aux_output_match) (struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct x86_perf_task_context_opt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int lbr_callstack_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int lbr_stack_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int log_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct x86_perf_task_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u64 lbr_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int valid_lbrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct x86_perf_task_context_opt opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct lbr_entry lbr[MAX_LBR_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct x86_perf_task_context_arch_lbr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct x86_perf_task_context_opt opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct lbr_entry entries[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Add padding to guarantee the 64-byte alignment of the state buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * The structure is dynamically allocated. The size of the LBR state may vary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * based on the number of LBR registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Do not put anything after the LBR state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct x86_perf_task_context_arch_lbr_xsave {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct x86_perf_task_context_opt opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct xregs_state xsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct fxregs_state i387;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct xstate_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct arch_lbr_state lbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #define x86_add_quirk(func_) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static struct x86_pmu_quirk __quirk __initdata = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) .func = func_, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) __quirk.next = x86_pmu.quirks; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) x86_pmu.quirks = &__quirk; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * x86_pmu flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) #define PMU_FL_TFA 0x20 /* deal with TSX force abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) #define PMU_FL_PAIR 0x40 /* merge counters for large incr. events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) #define EVENT_VAR(_id) event_attr_##_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #define EVENT_ATTR(_name, _id) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) .id = PERF_COUNT_HW_##_id, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) .event_str = NULL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) #define EVENT_ATTR_STR(_name, v, str) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static struct perf_pmu_events_attr event_attr_##v = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) .id = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) .event_str = str, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) #define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static struct perf_pmu_events_ht_attr event_attr_##v = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .id = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) .event_str_noht = noht, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) .event_str_ht = ht, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct pmu *x86_get_pmu(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) extern struct x86_pmu x86_pmu __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (static_cpu_has(X86_FEATURE_ARCH_LBR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return &((struct x86_perf_task_context *)ctx)->opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static inline bool x86_pmu_has_lbr_callstack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return x86_pmu.lbr_sel_map &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int x86_perf_event_set_period(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Generalized hw caching related hw_event table, filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * in on a per model basis. A value of 0 means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * 'not supported', -1 means 'hw_event makes no sense on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * this CPU', any other value means the raw hw_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) #define C(x) PERF_COUNT_HW_CACHE_##x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) extern u64 __read_mostly hw_cache_event_ids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) [PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) [PERF_COUNT_HW_CACHE_RESULT_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) extern u64 __read_mostly hw_cache_extra_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) [PERF_COUNT_HW_CACHE_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) [PERF_COUNT_HW_CACHE_OP_MAX]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) [PERF_COUNT_HW_CACHE_RESULT_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) u64 x86_perf_event_update(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static inline unsigned int x86_pmu_config_addr(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return x86_pmu.eventsel + (x86_pmu.addr_offset ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) x86_pmu.addr_offset(index, true) : index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static inline unsigned int x86_pmu_event_addr(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return x86_pmu.perfctr + (x86_pmu.addr_offset ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) x86_pmu.addr_offset(index, false) : index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static inline int x86_pmu_rdpmc_index(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int x86_add_exclusive(unsigned int what);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) void x86_del_exclusive(unsigned int what);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int x86_reserve_hardware(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) void x86_release_hardware(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int x86_pmu_max_precise(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) void hw_perf_lbr_event_destroy(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int x86_setup_perfctr(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) int x86_pmu_hw_config(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) void x86_pmu_disable_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static inline bool is_counter_pair(struct hw_perf_event *hwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return hwc->flags & PERF_X86_EVENT_PAIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) u64 enable_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (hwc->extra_reg.reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Add enabled Merge event on next counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * if large increment event being enabled on this counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (is_counter_pair(hwc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void x86_pmu_enable_all(int added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int perf_assign_events(struct event_constraint **constraints, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int wmin, int wmax, int gpmax, int *assign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) void x86_pmu_stop(struct perf_event *event, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static inline void x86_pmu_disable_event(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (is_counter_pair(hwc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) void x86_pmu_enable_event(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int x86_pmu_handle_irq(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) extern struct event_constraint emptyconstraint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) extern struct event_constraint unconstrained;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static inline bool kernel_ip(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return ip > PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return (long)ip < 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * Not all PMUs provide the right context information to place the reported IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * into full context. Specifically segment registers are typically not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * supplied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * Assuming the address is a linear address (it is for IBS), we fake the CS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * vm86 mode using the known zero-based code segment and 'fix up' the registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * to reflect this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Intel PEBS/LBR appear to typically provide the effective address, nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * much we can do about that but pray and treat it like a linear address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (regs->flags & X86_VM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) regs->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ssize_t intel_event_sysfs_show(char *page, u64 config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) char *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) char *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) #ifdef CONFIG_CPU_SUP_AMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int amd_pmu_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) #else /* CONFIG_CPU_SUP_AMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static inline int amd_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) #endif /* CONFIG_CPU_SUP_AMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static inline int is_pebs_pt(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #ifdef CONFIG_CPU_SUP_INTEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) unsigned int hw_event, bts_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (event->attr.freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return hw_event == bts_event && period == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static inline bool intel_pmu_has_bts(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return intel_pmu_has_bts_period(event, hwc->sample_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int intel_pmu_save_and_restart(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct event_constraint *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int intel_pmu_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) void init_debug_store_on_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) void fini_debug_store_on_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void release_ds_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) void reserve_ds_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) void release_lbr_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) void reserve_lbr_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) extern struct event_constraint bts_constraint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) extern struct event_constraint vlbr_constraint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) void intel_pmu_enable_bts(u64 config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) void intel_pmu_disable_bts(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) int intel_pmu_drain_bts_buffer(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) extern struct event_constraint intel_core2_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) extern struct event_constraint intel_atom_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) extern struct event_constraint intel_slm_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) extern struct event_constraint intel_glm_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) extern struct event_constraint intel_glp_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) extern struct event_constraint intel_nehalem_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) extern struct event_constraint intel_westmere_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) extern struct event_constraint intel_snb_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) extern struct event_constraint intel_ivb_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) extern struct event_constraint intel_hsw_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) extern struct event_constraint intel_bdw_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) extern struct event_constraint intel_skl_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) extern struct event_constraint intel_icl_pebs_event_constraints[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct event_constraint *intel_pebs_constraints(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) void intel_pmu_pebs_add(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) void intel_pmu_pebs_del(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) void intel_pmu_pebs_enable(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) void intel_pmu_pebs_disable(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void intel_pmu_pebs_enable_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) void intel_pmu_pebs_disable_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) void intel_pmu_auto_reload_read(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) void intel_ds_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct perf_event_context *next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) u64 lbr_from_signext_quirk_wr(u64 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) void intel_pmu_lbr_reset(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) void intel_pmu_lbr_reset_32(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) void intel_pmu_lbr_reset_64(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) void intel_pmu_lbr_add(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) void intel_pmu_lbr_del(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) void intel_pmu_lbr_enable_all(bool pmi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) void intel_pmu_lbr_disable_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) void intel_pmu_lbr_read(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) void intel_pmu_lbr_save(void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) void intel_pmu_lbr_restore(void *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) void intel_pmu_lbr_init_core(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) void intel_pmu_lbr_init_nhm(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) void intel_pmu_lbr_init_atom(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) void intel_pmu_lbr_init_slm(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) void intel_pmu_lbr_init_snb(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) void intel_pmu_lbr_init_hsw(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) void intel_pmu_lbr_init_skl(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) void intel_pmu_lbr_init_knl(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) void intel_pmu_arch_lbr_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) void intel_pmu_pebs_data_source_nhm(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) void intel_pmu_pebs_data_source_skl(bool pmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) int intel_pmu_setup_lbr_filter(struct perf_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) void intel_pt_interrupt(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) int intel_bts_interrupt(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) void intel_bts_enable_local(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) void intel_bts_disable_local(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) int p4_pmu_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) int p6_pmu_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) int knc_pmu_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static inline int is_ht_workaround_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #else /* CONFIG_CPU_SUP_INTEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static inline void reserve_ds_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static inline void release_ds_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static inline void release_lbr_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static inline void reserve_lbr_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static inline int intel_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static inline int is_ht_workaround_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) #endif /* CONFIG_CPU_SUP_INTEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) #if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) int zhaoxin_pmu_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static inline int zhaoxin_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #endif /*CONFIG_CPU_SUP_CENTAUR or CONFIG_CPU_SUP_ZHAOXIN*/