^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * @file nmi_timer_int.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * @remark Copyright 2011 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * @author Robert Richter <robert.richter@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/oprofile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #ifdef CONFIG_OPROFILE_NMI_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int ctr_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct perf_event_attr nmi_timer_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .type = PERF_TYPE_HARDWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) .config = PERF_COUNT_HW_CPU_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) .size = sizeof(struct perf_event_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .pinned = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) .disabled = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static void nmi_timer_callback(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct perf_sample_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) event->hw.interrupts = 0; /* don't throttle interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) oprofile_add_sample(regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int nmi_timer_start_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct perf_event *event = per_cpu(nmi_timer_events, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) nmi_timer_callback, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (IS_ERR(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return PTR_ERR(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) per_cpu(nmi_timer_events, cpu) = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (event && ctr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) perf_event_enable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void nmi_timer_stop_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct perf_event *event = per_cpu(nmi_timer_events, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (event && ctr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) perf_event_disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int nmi_timer_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) nmi_timer_start_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int nmi_timer_cpu_predown(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) nmi_timer_stop_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int nmi_timer_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ctr_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) nmi_timer_start_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void nmi_timer_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) nmi_timer_stop_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ctr_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static enum cpuhp_state hp_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void nmi_timer_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) cpuhp_remove_state(hp_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) event = per_cpu(nmi_timer_events, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) perf_event_disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) per_cpu(nmi_timer_events, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) perf_event_release_kernel(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int nmi_timer_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u64 period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* clock cycles per tick: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) period = (u64)cpu_khz * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) do_div(period, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) nmi_timer_attr.sample_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) nmi_timer_cpu_online, nmi_timer_cpu_predown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) nmi_timer_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) hp_online = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int __init op_nmi_timer_init(struct oprofile_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) err = nmi_timer_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) nmi_timer_shutdown(); /* only check, don't alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ops->create_files = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ops->setup = nmi_timer_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ops->shutdown = nmi_timer_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ops->start = nmi_timer_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ops->stop = nmi_timer_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ops->cpu_type = "timer";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif