^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * @file nmi_int.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * @remark Copyright 2002-2009 OProfile authors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * @remark Read the file COPYING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * @author John Levon <levon@movementarian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * @author Robert Richter <robert.richter@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * @author Barry Kasindorf <barry.kasindorf@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * @author Jason Yeh <jason.yeh@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/oprofile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "op_counter.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "op_x86_model.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct op_x86_model_spec *model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* must be protected with get_online_cpus()/put_online_cpus(): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int nmi_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int ctr_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct op_counter_config counter_config[OP_MAX_COUNTER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* common functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct op_counter_config *counter_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u16 event = (u16)counter_config->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) val |= ARCH_PERFMON_EVENTSEL_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) val |= (counter_config->unit_mask & 0xFF) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ARCH_PERFMON_EVENTSEL_EDGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ARCH_PERFMON_EVENTSEL_CMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) val |= counter_config->extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) event &= model->event_mask ? model->event_mask : 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) val |= event & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) val |= (u64)(event & 0x0F00) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (ctr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) else if (!nmi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NMI_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) model->stop(this_cpu_ptr(&cpu_msrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return NMI_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void nmi_cpu_save_registers(struct op_msrs *msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct op_msr *counters = msrs->counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct op_msr *controls = msrs->controls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) for (i = 0; i < model->num_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (counters[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) rdmsrl(counters[i].addr, counters[i].saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) for (i = 0; i < model->num_controls; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (controls[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) rdmsrl(controls[i].addr, controls[i].saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void nmi_cpu_start(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (!msrs->controls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) model->start(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int nmi_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ctr_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* make ctr_running visible to the nmi handler: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) on_each_cpu(nmi_cpu_start, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void nmi_cpu_stop(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!msrs->controls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) model->stop(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void nmi_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) on_each_cpu(nmi_cpu_stop, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ctr_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static DEFINE_PER_CPU(int, switch_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline int has_mux(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return !!model->switch_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) inline int op_x86_phys_to_virt(int phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return __this_cpu_read(switch_index) + phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) inline int op_x86_virt_to_phys(int virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return virt % model->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void nmi_shutdown_mux(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!has_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kfree(per_cpu(cpu_msrs, i).multiplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) per_cpu(cpu_msrs, i).multiplex = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) per_cpu(switch_index, i) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int nmi_setup_mux(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) size_t multiplex_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sizeof(struct op_msr) * model->num_virt_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!has_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) per_cpu(cpu_msrs, i).multiplex =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kzalloc(multiplex_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!per_cpu(cpu_msrs, i).multiplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct op_msr *multiplex = msrs->multiplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!has_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (i = 0; i < model->num_virt_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (counter_config[i].enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) multiplex[i].saved = -(u64)counter_config[i].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) multiplex[i].saved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) per_cpu(switch_index, cpu) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct op_msr *counters = msrs->counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct op_msr *multiplex = msrs->multiplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) for (i = 0; i < model->num_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int virt = op_x86_phys_to_virt(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (counters[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) rdmsrl(counters[i].addr, multiplex[virt].saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct op_msr *counters = msrs->counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct op_msr *multiplex = msrs->multiplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) for (i = 0; i < model->num_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int virt = op_x86_phys_to_virt(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (counters[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) wrmsrl(counters[i].addr, multiplex[virt].saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void nmi_cpu_switch(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int si = per_cpu(switch_index, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) nmi_cpu_stop(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) nmi_cpu_save_mpx_registers(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* move to next set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) si += model->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) per_cpu(switch_index, cpu) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) per_cpu(switch_index, cpu) = si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) model->switch_ctrl(model, msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) nmi_cpu_restore_mpx_registers(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) nmi_cpu_start(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Quick check to see if multiplexing is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * The check should be sufficient since counters are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * in ordre.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int nmi_multiplex_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return counter_config[model->num_counters].count ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int nmi_switch_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!has_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return -ENOSYS; /* not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (nmi_multiplex_on() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -EINVAL; /* not necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (ctr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) on_each_cpu(nmi_cpu_switch, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static inline void mux_init(struct oprofile_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (has_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ops->switch_events = nmi_switch_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void mux_clone(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!has_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) memcpy(per_cpu(cpu_msrs, cpu).multiplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) per_cpu(cpu_msrs, 0).multiplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) sizeof(struct op_msr) * model->num_virt_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) inline int op_x86_phys_to_virt(int phys) { return phys; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) inline int op_x86_virt_to_phys(int virt) { return virt; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline void nmi_shutdown_mux(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline int nmi_setup_mux(void) { return 1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static inline void mux_init(struct oprofile_operations *ops) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void mux_clone(int cpu) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void free_msrs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kfree(per_cpu(cpu_msrs, i).counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) per_cpu(cpu_msrs, i).counters = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) kfree(per_cpu(cpu_msrs, i).controls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) per_cpu(cpu_msrs, i).controls = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) nmi_shutdown_mux();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static int allocate_msrs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) size_t controls_size = sizeof(struct op_msr) * model->num_controls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) size_t counters_size = sizeof(struct op_msr) * model->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!per_cpu(cpu_msrs, i).counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!per_cpu(cpu_msrs, i).controls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!nmi_setup_mux())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) free_msrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void nmi_cpu_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) nmi_cpu_save_registers(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) raw_spin_lock(&oprofilefs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) model->setup_ctrs(model, msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) nmi_cpu_setup_mux(cpu, msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) raw_spin_unlock(&oprofilefs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) apic_write(APIC_LVTPC, APIC_DM_NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void nmi_cpu_restore_registers(struct op_msrs *msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct op_msr *counters = msrs->counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct op_msr *controls = msrs->controls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) for (i = 0; i < model->num_controls; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (controls[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) wrmsrl(controls[i].addr, controls[i].saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) for (i = 0; i < model->num_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (counters[i].addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) wrmsrl(counters[i].addr, counters[i].saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void nmi_cpu_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unsigned int v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* restoring APIC_LVTPC can trigger an apic error because the delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * mode and vector nr combination can be illegal. That's by design: on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * power on apic lvt contain a zero vector nr which are legal only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * NMI delivery mode. So inhibit apic err before restoring lvtpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) v = apic_read(APIC_LVTERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) apic_write(APIC_LVTERR, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) nmi_cpu_restore_registers(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int nmi_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (nmi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) nmi_cpu_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (ctr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) nmi_cpu_start(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static int nmi_cpu_down_prep(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (ctr_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) nmi_cpu_stop(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (nmi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) nmi_cpu_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int nmi_create_files(struct dentry *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) for (i = 0; i < model->num_virt_counters; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) char buf[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* quick little hack to _not_ expose a counter if it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * available for use. This should protect userspace app.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * NOTE: assumes 1:1 mapping here (that counters are organized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * sequentially in their struct assignment).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) snprintf(buf, sizeof(buf), "%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dir = oprofilefs_mkdir(root, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static enum cpuhp_state cpuhp_nmi_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int nmi_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!allocate_msrs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* We need to serialize save and setup for HT because the subset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * of msrs are distinct for save and setup operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Assume saved/restored counters are the same on all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!IS_ENABLED(CONFIG_SMP) || !cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) memcpy(per_cpu(cpu_msrs, cpu).counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) per_cpu(cpu_msrs, 0).counters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sizeof(struct op_msr) * model->num_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) memcpy(per_cpu(cpu_msrs, cpu).controls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) per_cpu(cpu_msrs, 0).controls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) sizeof(struct op_msr) * model->num_controls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mux_clone(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) nmi_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ctr_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* make variables visible to the nmi handler: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 0, "oprofile");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nmi_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* make nmi_enabled visible to the nmi handler: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) nmi_cpu_online, nmi_cpu_down_prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto fail_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) cpuhp_nmi_online = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) fail_nmi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) unregister_nmi_handler(NMI_LOCAL, "oprofile");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) free_msrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static void nmi_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct op_msrs *msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) cpuhp_remove_state(cpuhp_nmi_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) nmi_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ctr_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* make variables visible to the nmi handler: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unregister_nmi_handler(NMI_LOCAL, "oprofile");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) msrs = &get_cpu_var(cpu_msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) model->shutdown(msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) free_msrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) put_cpu_var(cpu_msrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static int nmi_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Only one CPU left, just stop that one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (nmi_enabled == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) nmi_cpu_stop(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void nmi_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (nmi_enabled == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) nmi_cpu_start(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static struct syscore_ops oprofile_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) .resume = nmi_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) .suspend = nmi_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void __init init_suspend_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) register_syscore_ops(&oprofile_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void exit_suspend_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unregister_syscore_ops(&oprofile_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static inline void init_suspend_resume(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static inline void exit_suspend_resume(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static int __init p4_init(char **cpu_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) __u8 cpu_model = boot_cpu_data.x86_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (cpu_model > 6 || cpu_model == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *cpu_type = "i386/p4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) model = &op_p4_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) switch (smp_num_siblings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *cpu_type = "i386/p4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) model = &op_p4_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *cpu_type = "i386/p4-ht";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) model = &op_p4_ht2_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) enum __force_cpu_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) reserved = 0, /* do not force */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) arch_perfmon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static int force_cpu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int set_cpu_type(const char *str, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!strcmp(str, "timer")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) force_cpu_type = timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else if (!strcmp(str, "arch_perfmon")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) force_cpu_type = arch_perfmon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) force_cpu_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int __init ppro_init(char **cpu_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) __u8 cpu_model = boot_cpu_data.x86_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Documentation on identifying Intel processors by CPU family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * and model can be found in the Intel Software Developer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * Manuals (SDM):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * http://www.intel.com/products/processor/manuals/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * As of May 2010 the documentation for this was in the:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * "Intel 64 and IA-32 Architectures Software Developer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Manual Volume 3B: System Programming Guide", "Table B-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * CPUID Signature Values of DisplayFamily_DisplayModel".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) switch (cpu_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) case 0 ... 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *cpu_type = "i386/ppro";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) case 3 ... 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *cpu_type = "i386/pii";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) case 6 ... 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) case 10 ... 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *cpu_type = "i386/piii";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) case 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *cpu_type = "i386/p6_mobile";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *cpu_type = "i386/core";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) case 0x0f:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case 0x16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case 0x17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case 0x1d:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) *cpu_type = "i386/core_2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case 0x1a:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) case 0x1e:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) case 0x2e:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spec = &op_arch_perfmon_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) *cpu_type = "i386/core_i7";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) case 0x1c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *cpu_type = "i386/atom";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* Unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) model = spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int __init op_nmi_init(struct oprofile_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) __u8 vendor = boot_cpu_data.x86_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) __u8 family = boot_cpu_data.x86;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) char *cpu_type = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!boot_cpu_has(X86_FEATURE_APIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (force_cpu_type == timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) switch (vendor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) case X86_VENDOR_AMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* Needs to be at least an Athlon (or hammer in 32bit mode) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) cpu_type = "i386/athlon";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case 0xf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * Actually it could be i386/hammer too, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * give user space an consistent name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) cpu_type = "x86-64/hammer";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) cpu_type = "x86-64/family10";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) case 0x11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) cpu_type = "x86-64/family11h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) case 0x12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cpu_type = "x86-64/family12h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) case 0x14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cpu_type = "x86-64/family14h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case 0x15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) cpu_type = "x86-64/family15h";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) model = &op_amd_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) case X86_VENDOR_INTEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Pentium IV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) case 0xf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) p4_init(&cpu_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* A P6-class processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ppro_init(&cpu_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (cpu_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* use arch perfmon as fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) cpu_type = "i386/arch_perfmon";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) model = &op_arch_perfmon_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* default values, can be overwritten by model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ops->create_files = nmi_create_files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ops->setup = nmi_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ops->shutdown = nmi_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ops->start = nmi_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ops->stop = nmi_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ops->cpu_type = cpu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (model->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ret = model->init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!model->num_virt_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) model->num_virt_counters = model->num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mux_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) init_suspend_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) printk(KERN_INFO "oprofile: using NMI interrupt.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) void op_nmi_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) exit_suspend_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }