^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Common Performance counter support functions for PowerISA v2.07 processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2009 Paul Mackerras, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2013 Michael Ellerman, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "isa207-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) PMU_FORMAT_ATTR(event, "config:0-49");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) PMU_FORMAT_ATTR(mark, "config:8");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) PMU_FORMAT_ATTR(combine, "config:11");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) PMU_FORMAT_ATTR(unit, "config:12-15");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) PMU_FORMAT_ATTR(pmc, "config:16-19");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) PMU_FORMAT_ATTR(cache_sel, "config:20-23");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) PMU_FORMAT_ATTR(sample_mode, "config:24-28");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) PMU_FORMAT_ATTR(thresh_start, "config:36-39");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct attribute *isa207_pmu_format_attr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) &format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) &format_attr_pmcxsel.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) &format_attr_mark.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) &format_attr_combine.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) &format_attr_unit.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) &format_attr_pmc.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) &format_attr_cache_sel.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) &format_attr_sample_mode.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) &format_attr_thresh_sel.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) &format_attr_thresh_stop.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) &format_attr_thresh_start.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) &format_attr_thresh_cmp.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct attribute_group isa207_pmu_format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .attrs = isa207_pmu_format_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline bool event_is_fab_match(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) event &= 0xff0fe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return (event == 0x30056 || event == 0x4f052);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static bool is_event_valid(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 valid_mask = EVENT_VALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) valid_mask = p10_EVENT_VALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) else if (cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) valid_mask = p9_EVENT_VALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return !(event & ~valid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline bool is_event_marked(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (event & EVENT_IS_MARKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static unsigned long sdar_mod_val(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return p10_SDAR_MODE(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return p9_SDAR_MODE(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * continous sampling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Incase of Power8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * mode and will be un-changed when setting MMCRA[63] (Marked events).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Incase of Power9/power10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * or if group already have any marked events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * For rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * MMCRA[SDAR_MODE] will be set from event code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * If sdar_mode from event is zero, default to 0b01. Hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * requires that we set a non-zero value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (cpu_has_feature(CPU_FTR_ARCH_300)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) else if (sdar_mod_val(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *mmcra |= MMCRA_SDAR_MODE_DCACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *mmcra |= MMCRA_SDAR_MODE_TLB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static u64 thresh_cmp_val(u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return value << p9_MMCRA_THR_CMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return value << MMCRA_THR_CMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static unsigned long combine_from_event(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return p9_EVENT_COMBINE(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return EVENT_COMBINE(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static unsigned long combine_shift(unsigned long pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return p9_MMCR1_COMBINE_SHIFT(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return MMCR1_COMBINE_SHIFT(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline bool event_is_threshold(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static bool is_thresh_cmp_valid(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned int cmp, exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Check the mantissa upper two bits are not zero, unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Power10: thresh_cmp is replaced by l2_l3 event select.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) exp = cmp >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (exp && (cmp & 0x60) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned int cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u64 ret = PERF_MEM_NA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) switch(idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ret = PH(LVL, L1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ret = PH(LVL, L2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ret = PH(LVL, L3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (sub_idx <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ret = PH(LVL, LOC_RAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) else if (sub_idx > 1 && sub_idx <= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = PH(LVL, REM_RAM1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ret = PH(LVL, REM_RAM2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ret |= P(SNOOP, HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = PH(LVL, REM_CCE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if ((sub_idx == 0) || (sub_idx == 2) || (sub_idx == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret |= P(SNOOP, HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) else if ((sub_idx == 1) || (sub_idx == 3) || (sub_idx == 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret |= P(SNOOP, HITM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ret = PH(LVL, REM_CCE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if ((sub_idx == 0) || (sub_idx == 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret |= P(SNOOP, HIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) else if ((sub_idx == 1) || (sub_idx == 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret |= P(SNOOP, HITM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = PM(LVL, L1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u64 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 sub_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u64 sier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Skip if no SIER support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (!(flags & PPMU_HAS_SIER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dsrc->val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) sier = mfspr(SPRN_SIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (val == 1 || val == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dsrc->val = isa207_find_source(idx, sub_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void isa207_get_mem_weight(u64 *weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u64 mmcra = mfspr(SPRN_MMCRA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u64 exp = MMCRA_THR_CTR_EXP(mmcra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u64 sier = mfspr(SPRN_SIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mantissa = P10_MMCRA_THR_CTR_MANT(mmcra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (val == 0 || val == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *weight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) *weight = mantissa << (2 * exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned int unit, pmc, cache, ebb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned long mask, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mask = value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!is_event_valid(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) cache = (event >> EVENT_CACHE_SEL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) p10_EVENT_CACHE_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) cache = (event >> EVENT_CACHE_SEL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) EVENT_CACHE_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u64 base_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (pmc > 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Ignore Linux defined bits when checking event below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) base_event = event & ~EVENT_LINUX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (pmc >= 5 && base_event != 0x500fa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) base_event != 0x600f4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) mask |= CNST_PMC_MASK(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) value |= CNST_PMC_VAL(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * PMC5 and PMC6 are used to count cycles and instructions and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * they do not support most of the constraint bits. Add a check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * to exclude PMC5/6 from most of the constraints except for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * EBB/BHRB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (pmc >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto ebb_bhrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (pmc <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Add to number of counters in use. Note this includes events with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * a PMC of 0 - they still need a PMC, it's just assigned later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Don't count events on PMC 5 & 6, there is only one valid event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * on each of those counters, and they are handled above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mask |= CNST_NC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) value |= CNST_NC_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (unit >= 6 && unit <= 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (unit == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) mask |= CNST_L2L3_GROUP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mask |= CNST_CACHE_GROUP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) value |= CNST_CACHE_GROUP_VAL(event & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mask |= CNST_CACHE_PMC4_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (pmc == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) value |= CNST_CACHE_PMC4_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) } else if (cache & 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * L2/L3 events contain a cache selector field, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * supposed to be programmed into MMCRC. However MMCRC is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * HV writable, and there is no API for guest kernels to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * it. The solution is for the hypervisor to initialise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * field to zeroes, and for us to only ever allow events that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * have a cache selector of zero. The bank selector (bit 3) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * irrelevant, as long as the rest of the value is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mask |= CNST_L1_QUAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) value |= CNST_L1_QUAL_VAL(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) mask |= CNST_RADIX_SCOPE_GROUP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (is_event_marked(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) mask |= CNST_SAMPLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (event_is_threshold(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mask |= CNST_THRESH_CTL_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mask |= CNST_THRESH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * the threshold control bits are used for the match value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (event_is_fab_match(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mask |= CNST_FAB_MATCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!is_thresh_cmp_valid(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mask |= CNST_THRESH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ebb_bhrb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (!pmc && ebb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* EBB events must specify the PMC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (event & EVENT_WANTS_BHRB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!ebb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Only EBB events can request BHRB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mask |= CNST_IFM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * All events must agree on EBB, either all request it or none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * EBB events are pinned & exclusive, so this should never actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * hit, but we leave it as a fallback in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mask |= CNST_EBB_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) value |= CNST_EBB_VAL(ebb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *maskp = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) *valp = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int isa207_compute_mmcr(u64 event[], int n_ev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned int hwc[], struct mmcr_regs *mmcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct perf_event *pevents[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned long mmcr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned int pmc, pmc_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pmc_inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* First pass to count resource use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) for (i = 0; i < n_ev; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pmc_inuse |= 1 << pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) mmcra = mmcr1 = mmcr2 = mmcr3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Disable bhrb unless explicitly requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * by setting MMCRA (BHRBRD) bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mmcra |= MMCRA_BHRB_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* Second pass: assign PMCs, set all MMCR1 fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) for (i = 0; i < n_ev; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) combine = combine_from_event(event[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) psel = event[i] & EVENT_PSEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) for (pmc = 1; pmc <= 4; ++pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!(pmc_inuse & (1 << pmc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pmc_inuse |= 1 << pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (pmc <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mmcr1 |= combine << combine_shift(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* In continuous sampling mode, update SDAR on TLB miss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) mmcra_sdar_mode(event[i], &mmcra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (cpu_has_feature(CPU_FTR_ARCH_300)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) cache = dc_ic_rld_quad_l1_sel(event[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (event[i] & EVENT_IS_L1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) cache = dc_ic_rld_quad_l1_sel(event[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Set RADIX_SCOPE_QUAL bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) p10_EVENT_RADIX_SCOPE_QUAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (is_event_marked(event[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mmcra |= MMCRA_SAMPLE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * the threshold bits are used for the match value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mmcra |= val << MMCRA_THR_CTL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mmcra |= val << MMCRA_THR_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) val = (event[i] >> EVENT_THR_CMP_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) EVENT_THR_CMP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) mmcra |= thresh_cmp_val(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) val = (event[i] >> p10_L2L3_EVENT_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) p10_EVENT_L2L3_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) mmcr2 |= val << p10_L2L3_SEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (event[i] & EVENT_WANTS_BHRB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) mmcra |= val << MMCRA_IFM_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (cpu_has_feature(CPU_FTR_ARCH_31) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) (has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) mmcra &= ~MMCRA_BHRB_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (pevents[i]->attr.exclude_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) mmcr2 |= MMCR2_FCP(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (pevents[i]->attr.exclude_hv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mmcr2 |= MMCR2_FCH(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (pevents[i]->attr.exclude_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (cpu_has_feature(CPU_FTR_HVMODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mmcr2 |= MMCR2_FCH(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mmcr2 |= MMCR2_FCS(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (cpu_has_feature(CPU_FTR_ARCH_31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (pmc <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) p10_EVENT_MMCR3_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mmcr3 |= val << MMCR3_SHIFT(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) hwc[i] = pmc - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Return MMCRx values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) mmcr->mmcr0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* pmc_inuse is 1-based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (pmc_inuse & 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mmcr->mmcr0 = MMCR0_PMC1CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (pmc_inuse & 0x7c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mmcr->mmcr0 |= MMCR0_PMCjCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* If we're not using PMC 5 or 6, freeze them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!(pmc_inuse & 0x60))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) mmcr->mmcr0 |= MMCR0_FC56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Set mmcr0 (PMCCEXT) for p10 which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * will restrict access to group B registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * when MMCR0 PMCC=0b00.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (cpu_has_feature(CPU_FTR_ARCH_31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mmcr->mmcr0 |= MMCR0_PMCCEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) mmcr->mmcr1 = mmcr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mmcr->mmcra = mmcra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) mmcr->mmcr2 = mmcr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mmcr->mmcr3 = mmcr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (pmc <= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) for (i = 0; i < size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (event < ev_alt[i][0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (event == ev_alt[i][j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) const unsigned int ev_alt[][MAX_ALT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int i, j, num_alt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) u64 alt_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) alt[num_alt++] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) i = find_alternative(event, ev_alt, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Filter out the original event, it's already in alt[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) for (j = 0; j < MAX_ALT; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) alt_event = ev_alt[i][j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (alt_event && alt_event != event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) alt[num_alt++] = alt_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (flags & PPMU_ONLY_COUNT_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * We're only counting in RUN state, so PM_CYC is equivalent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) j = num_alt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) for (i = 0; i < num_alt; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) switch (alt[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) case 0x1e: /* PMC_CYC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) alt[j++] = 0x600f4; /* PM_RUN_CYC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) case 0x600f4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) alt[j++] = 0x1e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case 0x2: /* PM_INST_CMPL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case 0x500fa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) alt[j++] = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) num_alt = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return num_alt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }