^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * L220/L310 cache controller support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/hardware/cache-l2x0.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define PMU_NR_COUNTERS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static void __iomem *l2x0_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct pmu *l2x0_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static cpumask_t pmu_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static const char *l2x0_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static ktime_t l2x0_pmu_poll_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static struct hrtimer l2x0_pmu_hrtimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * The L220/PL310 PMU has two equivalent counters, Counter1 and Counter0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Registers controlling these are laid out in pairs, in descending order, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * the register for Counter1 comes first, followed by the register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Counter0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * We ensure that idx 0 -> Counter0, and idx1 -> Counter1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct perf_event *events[PMU_NR_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Find an unused counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int l2x0_pmu_find_idx(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) for (i = 0; i < PMU_NR_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!events[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* How many counters are allocated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int l2x0_pmu_num_active_counters(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int i, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) for (i = 0; i < PMU_NR_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (events[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void l2x0_pmu_counter_config_write(int idx, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_CFG - 4 * idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static u32 l2x0_pmu_counter_read(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return readl_relaxed(l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void l2x0_pmu_counter_write(int idx, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void __l2x0_pmu_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) val |= L2X0_EVENT_CNT_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void __l2x0_pmu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) val &= ~L2X0_EVENT_CNT_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void l2x0_pmu_enable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (l2x0_pmu_num_active_counters() == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __l2x0_pmu_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void l2x0_pmu_disable(struct pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (l2x0_pmu_num_active_counters() == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __l2x0_pmu_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void warn_if_saturated(u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (count != 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pr_warn_ratelimited("L2X0 counter saturated. Poll period too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void l2x0_pmu_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u64 prev_count, new_count, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) prev_count = local64_read(&hw->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) new_count = l2x0_pmu_counter_read(hw->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mask = GENMASK_ULL(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) local64_add((new_count - prev_count) & mask, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) warn_if_saturated(new_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void l2x0_pmu_event_configure(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * The L2X0 counters saturate at 0xffffffff rather than wrapping, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * will *always* lose some number of events when a counter saturates,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * and have no way of detecting how many were lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * To minimize the impact of this, we try to maximize the period by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * always starting counters at zero. To ensure that group ratios are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * representative, we poll periodically to avoid counters saturating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * See l2x0_pmu_poll().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) local64_set(&hw->prev_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) l2x0_pmu_counter_write(hw->idx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static enum hrtimer_restart l2x0_pmu_poll(struct hrtimer *hrtimer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __l2x0_pmu_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) for (i = 0; i < PMU_NR_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct perf_event *event = events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) l2x0_pmu_event_read(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) l2x0_pmu_event_configure(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __l2x0_pmu_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) hrtimer_forward_now(hrtimer, l2x0_pmu_poll_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __l2x0_pmu_event_enable(int idx, u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) val = event << L2X0_EVENT_CNT_CFG_SRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) val |= L2X0_EVENT_CNT_CFG_INT_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) l2x0_pmu_counter_config_write(idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void l2x0_pmu_event_start(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (flags & PERF_EF_RELOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) l2x0_pmu_event_configure(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hw->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __l2x0_pmu_event_enable(hw->idx, hw->config_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void __l2x0_pmu_event_disable(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) val = L2X0_EVENT_CNT_CFG_SRC_DISABLED << L2X0_EVENT_CNT_CFG_SRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) val |= L2X0_EVENT_CNT_CFG_INT_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) l2x0_pmu_counter_config_write(idx, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void l2x0_pmu_event_stop(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (WARN_ON_ONCE(event->hw.state & PERF_HES_STOPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __l2x0_pmu_event_disable(hw->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) hw->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (flags & PERF_EF_UPDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) l2x0_pmu_event_read(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) hw->state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int l2x0_pmu_event_add(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int idx = l2x0_pmu_find_idx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (idx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Pin the timer, so that the overflows are handled by the chosen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * event->cpu (this is the same one as presented in "cpumask"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * attribute).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (l2x0_pmu_num_active_counters() == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) hrtimer_start(&l2x0_pmu_hrtimer, l2x0_pmu_poll_period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) HRTIMER_MODE_REL_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) events[idx] = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) hw->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) l2x0_pmu_event_configure(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) hw->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (flags & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) l2x0_pmu_event_start(event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void l2x0_pmu_event_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) l2x0_pmu_event_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) events[hw->idx] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) hw->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (l2x0_pmu_num_active_counters() == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) hrtimer_cancel(&l2x0_pmu_hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static bool l2x0_pmu_group_is_valid(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct pmu *pmu = event->pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct perf_event *leader = event->group_leader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct perf_event *sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int num_hw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (leader->pmu == pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) num_hw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) else if (!is_software_event(leader))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) for_each_sibling_event(sibling, leader) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (sibling->pmu == pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) num_hw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) else if (!is_software_event(sibling))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return num_hw <= PMU_NR_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static int l2x0_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct hw_perf_event *hw = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (event->attr.type != l2x0_pmu->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (is_sampling_event(event) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) event->attach_state & PERF_ATTACH_TASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (event->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (event->attr.config & ~L2X0_EVENT_CNT_CFG_SRC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) hw->config_base = event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!l2x0_pmu_group_is_valid(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) event->cpu = cpumask_first(&pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct l2x0_event_attribute {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct device_attribute attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned int config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bool pl310_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define L2X0_EVENT_ATTR(_name, _config, _pl310_only) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) (&((struct l2x0_event_attribute[]) {{ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .attr = __ATTR(_name, S_IRUGO, l2x0_pmu_event_show, NULL), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .config = _config, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .pl310_only = _pl310_only, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }})[0].attr.attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define L220_PLUS_EVENT_ATTR(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) L2X0_EVENT_ATTR(_name, _config, false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #define PL310_EVENT_ATTR(_name, _config) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) L2X0_EVENT_ATTR(_name, _config, true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static ssize_t l2x0_pmu_event_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct l2x0_event_attribute *lattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) lattr = container_of(attr, typeof(*lattr), attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return snprintf(buf, PAGE_SIZE, "config=0x%x\n", lattr->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static umode_t l2x0_pmu_event_attr_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct pmu *pmu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct l2x0_event_attribute *lattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) lattr = container_of(attr, typeof(*lattr), attr.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!lattr->pl310_only || strcmp("l2c_310", pmu->name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static struct attribute *l2x0_pmu_event_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) L220_PLUS_EVENT_ATTR(co, 0x1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) L220_PLUS_EVENT_ATTR(drhit, 0x2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) L220_PLUS_EVENT_ATTR(drreq, 0x3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) L220_PLUS_EVENT_ATTR(dwhit, 0x4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) L220_PLUS_EVENT_ATTR(dwreq, 0x5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) L220_PLUS_EVENT_ATTR(dwtreq, 0x6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) L220_PLUS_EVENT_ATTR(irhit, 0x7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) L220_PLUS_EVENT_ATTR(irreq, 0x8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) L220_PLUS_EVENT_ATTR(wa, 0x9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) PL310_EVENT_ATTR(ipfalloc, 0xa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) PL310_EVENT_ATTR(epfhit, 0xb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) PL310_EVENT_ATTR(epfalloc, 0xc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) PL310_EVENT_ATTR(srrcvd, 0xd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) PL310_EVENT_ATTR(srconf, 0xe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) PL310_EVENT_ATTR(epfrcvd, 0xf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static struct attribute_group l2x0_pmu_event_attrs_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .attrs = l2x0_pmu_event_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .is_visible = l2x0_pmu_event_attr_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static ssize_t l2x0_pmu_cpumask_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return cpumap_print_to_pagebuf(true, buf, &pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static struct device_attribute l2x0_pmu_cpumask_attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) __ATTR(cpumask, S_IRUGO, l2x0_pmu_cpumask_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static struct attribute *l2x0_pmu_cpumask_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) &l2x0_pmu_cpumask_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static struct attribute_group l2x0_pmu_cpumask_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .attrs = l2x0_pmu_cpumask_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static const struct attribute_group *l2x0_pmu_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) &l2x0_pmu_event_attrs_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) &l2x0_pmu_cpumask_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void l2x0_pmu_reset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) __l2x0_pmu_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) for (i = 0; i < PMU_NR_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) __l2x0_pmu_event_disable(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static int l2x0_pmu_offline_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) unsigned int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!cpumask_test_and_clear_cpu(cpu, &pmu_cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) target = cpumask_any_but(cpu_online_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (target >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) perf_pmu_migrate_context(l2x0_pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) cpumask_set_cpu(target, &pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) void l2x0_pmu_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!l2x0_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) l2x0_pmu_disable(l2x0_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) for (i = 0; i < PMU_NR_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (events[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) l2x0_pmu_event_stop(events[i], PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) void l2x0_pmu_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!l2x0_pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) l2x0_pmu_reset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) for (i = 0; i < PMU_NR_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (events[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) l2x0_pmu_event_start(events[i], PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) l2x0_pmu_enable(l2x0_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) void __init l2x0_pmu_register(void __iomem *base, u32 part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Determine whether we support the PMU, and choose the name for sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * This is also used by l2x0_pmu_event_attr_is_visible to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * which events to display, as the PL310 PMU supports a superset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * L220 events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * The L210 PMU has a different programmer's interface, and is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * supported by this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * We must defer registering the PMU until the perf subsystem is up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * running, so just stash the name and base, and leave that to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * initcall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) switch (part & L2X0_CACHE_ID_PART_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case L2X0_CACHE_ID_PART_L220:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) l2x0_name = "l2c_220";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case L2X0_CACHE_ID_PART_L310:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) l2x0_name = "l2c_310";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) l2x0_base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static __init int l2x0_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!l2x0_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) l2x0_pmu = kzalloc(sizeof(*l2x0_pmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!l2x0_pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pr_warn("Unable to allocate L2x0 PMU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *l2x0_pmu = (struct pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .task_ctx_nr = perf_invalid_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .pmu_enable = l2x0_pmu_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .pmu_disable = l2x0_pmu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .read = l2x0_pmu_event_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .start = l2x0_pmu_event_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .stop = l2x0_pmu_event_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .add = l2x0_pmu_event_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .del = l2x0_pmu_event_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) .event_init = l2x0_pmu_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) .attr_groups = l2x0_pmu_attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) l2x0_pmu_reset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * We always use a hrtimer rather than an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * See comments in l2x0_pmu_event_configure and l2x0_pmu_poll.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Polling once a second allows the counters to fill up to 1/128th on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * quad-core test chip with cores clocked at 400MHz. Hopefully this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * leaves sufficient headroom to avoid overflow on production silicon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * at higher frequencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) l2x0_pmu_poll_period = ms_to_ktime(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) hrtimer_init(&l2x0_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) l2x0_pmu_hrtimer.function = l2x0_pmu_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) cpumask_set_cpu(0, &pmu_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) "perf/arm/l2x0:online", NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) l2x0_pmu_offline_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto out_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ret = perf_pmu_register(l2x0_pmu, l2x0_name, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto out_cpuhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) out_cpuhp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) out_pmu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) kfree(l2x0_pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) l2x0_pmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) device_initcall(l2x0_pmu_init);