Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Support Intel/AMD RAPL energy consumption counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2013 Google, Inc., Stephane Eranian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Intel RAPL interface is specified in the IA-32 Manual Vol3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * section 14.7.1 (September 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * AMD RAPL interface for Fam17h is described in the public PPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * https://bugzilla.kernel.org/show_bug.cgi?id=206537
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * RAPL provides more controls than just reporting energy consumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * however here we only expose the 3 energy consumption free running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * counters (pp0, pkg, dram).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Each of those counters increments in a power unit defined by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * but it can vary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Counter to rapl events mappings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *  pp0 counter: consumption of all physical cores (power plane 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * 	  event: rapl_energy_cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *    perf code: 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *  pkg counter: consumption of the whole processor package
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	  event: rapl_energy_pkg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *    perf code: 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * dram counter: consumption of the dram domain (servers only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	  event: rapl_energy_dram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *    perf code: 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * gpu counter: consumption of the builtin-gpu domain (client only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *	  event: rapl_energy_gpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *    perf code: 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *  psys counter: consumption of the builtin-psys domain (client only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *	  event: rapl_energy_psys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *    perf code: 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * We manage those counters as free running (read-only). They may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * use simultaneously by other tools, such as turbostat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * The events only support system-wide mode counting. There is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * sampling support because it does not make sense and is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * supported by the RAPL hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * Because we want to avoid floating-point operations in the kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * the events are all reported in fixed point arithmetic (32.32).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * Tools must adjust the counts to convert them to Watts using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * the duration of the measurement. Tools may use a function such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * ldexp(raw_count, -32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define pr_fmt(fmt) "RAPL PMU: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #include "perf_event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #include "probe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * RAPL energy status counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) enum perf_rapl_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	PERF_RAPL_PP0 = 0,		/* all cores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	PERF_RAPL_PKG,			/* entire package */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	PERF_RAPL_RAM,			/* DRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	PERF_RAPL_PP1,			/* gpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	PERF_RAPL_PSYS,			/* psys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	PERF_RAPL_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	NR_RAPL_DOMAINS = PERF_RAPL_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	"pp0-core",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	"package",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	"dram",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	"pp1-gpu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	"psys",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * event code: LSB 8 bits, passed in attr->config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * any other bit is reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define RAPL_EVENT_MASK	0xFFULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define RAPL_CNTR_WIDTH 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define RAPL_EVENT_ATTR_STR(_name, v, str)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static struct perf_pmu_events_attr event_attr_##v = {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL),	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	.id		= 0,							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	.event_str	= str,							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct rapl_pmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	raw_spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	int			n_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	int			cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct list_head	active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct pmu		*pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	ktime_t			timer_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct hrtimer		hrtimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct rapl_pmus {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct pmu		pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	unsigned int		maxdie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct rapl_pmu		*pmus[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) enum rapl_unit_quirk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	RAPL_UNIT_QUIRK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	RAPL_UNIT_QUIRK_INTEL_HSW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	RAPL_UNIT_QUIRK_INTEL_SPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct rapl_model {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct perf_msr *rapl_msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsigned long	events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	unsigned int	msr_power_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	enum rapl_unit_quirk	unit_quirk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  /* 1/2^hw_unit Joule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static struct rapl_pmus *rapl_pmus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static cpumask_t rapl_cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static unsigned int rapl_cntr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static u64 rapl_timer_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static struct perf_msr *rapl_msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	unsigned int dieid = topology_logical_die_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * The unsigned check also catches the '-1' return value for non
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * existent mappings in the topology map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return dieid < rapl_pmus->maxdie ? rapl_pmus->pmus[dieid] : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline u64 rapl_read_counter(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	u64 raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	rdmsrl(event->hw.event_base, raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	return raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline u64 rapl_scale(u64 v, int cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (cfg > NR_RAPL_DOMAINS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		pr_warn("Invalid domain %d, failed to scale data\n", cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * scale delta to smallest unit (1/2^32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * users must then scale back: count * 1/(1e9*2^32) to get Joules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * or use ldexp(count, -32).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * Watts = Joules/Time delta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return v << (32 - rapl_hw_unit[cfg - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static u64 rapl_event_update(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u64 prev_raw_count, new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	s64 delta, sdelta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int shift = RAPL_CNTR_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	prev_raw_count = local64_read(&hwc->prev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	rdmsrl(event->hw.event_base, new_raw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			    new_raw_count) != prev_raw_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 * Now we have the new raw value and have updated the prev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 * timestamp already. We can now calculate the elapsed delta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	 * (event-)time and add that to the generic event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 * Careful, not all hw sign-extends above the physical width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * of the count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	delta >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	sdelta = rapl_scale(delta, event->hw.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	local64_add(sdelta, &event->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	return new_raw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void rapl_start_hrtimer(struct rapl_pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)        hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		     HRTIMER_MODE_REL_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (!pmu->n_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	raw_spin_lock_irqsave(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	list_for_each_entry(event, &pmu->active_list, active_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		rapl_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	hrtimer_forward_now(hrtimer, pmu->timer_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void rapl_hrtimer_init(struct rapl_pmu *pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct hrtimer *hr = &pmu->hrtimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	hr->function = rapl_hrtimer_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 				   struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	event->hw.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	list_add_tail(&event->active_entry, &pmu->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	local64_set(&event->hw.prev_count, rapl_read_counter(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	pmu->n_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (pmu->n_active == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		rapl_start_hrtimer(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void rapl_pmu_event_start(struct perf_event *event, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct rapl_pmu *pmu = event->pmu_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	raw_spin_lock_irqsave(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	__rapl_pmu_event_start(pmu, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void rapl_pmu_event_stop(struct perf_event *event, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	struct rapl_pmu *pmu = event->pmu_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	raw_spin_lock_irqsave(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/* mark event as deactivated and stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (!(hwc->state & PERF_HES_STOPPED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		WARN_ON_ONCE(pmu->n_active <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		pmu->n_active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		if (pmu->n_active == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			hrtimer_cancel(&pmu->hrtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		list_del(&event->active_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		hwc->state |= PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	/* check if update of sw counter is necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		 * Drain the remaining delta count out of a event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		 * that we are disabling:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		rapl_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		hwc->state |= PERF_HES_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int rapl_pmu_event_add(struct perf_event *event, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct rapl_pmu *pmu = event->pmu_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct hw_perf_event *hwc = &event->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	raw_spin_lock_irqsave(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (mode & PERF_EF_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		__rapl_pmu_event_start(pmu, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void rapl_pmu_event_del(struct perf_event *event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	rapl_pmu_event_stop(event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int rapl_pmu_event_init(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	u64 cfg = event->attr.config & RAPL_EVENT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int bit, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct rapl_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/* only look at RAPL events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (event->attr.type != rapl_pmus->pmu.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	/* check only supported bits are set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (event->attr.config & ~RAPL_EVENT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (event->cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	if (!cfg || cfg >= NR_RAPL_DOMAINS + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	cfg = array_index_nospec((long)cfg, NR_RAPL_DOMAINS + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	bit = cfg - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	/* check event supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (!(rapl_cntr_mask & (1 << bit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	/* unsupported modes and filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	if (event->attr.sample_period) /* no sampling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	/* must be done before validate_group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	pmu = cpu_to_rapl_pmu(event->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (!pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	event->cpu = pmu->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	event->pmu_private = pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	event->hw.event_base = rapl_msrs[bit].msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	event->hw.config = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	event->hw.idx = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void rapl_pmu_event_read(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	rapl_event_update(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static ssize_t rapl_get_attr_cpumask(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 				struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static struct attribute *rapl_pmu_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	&dev_attr_cpumask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static struct attribute_group rapl_pmu_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	.attrs = rapl_pmu_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) RAPL_EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) RAPL_EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) RAPL_EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) RAPL_EVENT_ATTR_STR(energy-psys,   rapl_psys, "event=0x05");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) RAPL_EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) RAPL_EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) RAPL_EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) RAPL_EVENT_ATTR_STR(energy-psys.unit,   rapl_psys_unit, "Joules");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  * we compute in 0.23 nJ increments regardless of MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) RAPL_EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) RAPL_EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) RAPL_EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) RAPL_EVENT_ATTR_STR(energy-psys.scale,   rapl_psys_scale, "2.3283064365386962890625e-10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * There are no default events, but we need to create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  * "events" group (with empty attrs) before updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  * it with detected events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static struct attribute *attrs_empty[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static struct attribute_group rapl_pmu_events_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	.name = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	.attrs = attrs_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) PMU_FORMAT_ATTR(event, "config:0-7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static struct attribute *rapl_formats_attr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	&format_attr_event.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static struct attribute_group rapl_pmu_format_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	.name = "format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	.attrs = rapl_formats_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static const struct attribute_group *rapl_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	&rapl_pmu_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	&rapl_pmu_format_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	&rapl_pmu_events_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static struct attribute *rapl_events_cores[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	EVENT_PTR(rapl_cores),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	EVENT_PTR(rapl_cores_unit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	EVENT_PTR(rapl_cores_scale),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static umode_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) rapl_not_visible(struct kobject *kobj, struct attribute *attr, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static struct attribute_group rapl_events_cores_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	.name  = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.attrs = rapl_events_cores,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	.is_visible = rapl_not_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static struct attribute *rapl_events_pkg[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	EVENT_PTR(rapl_pkg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	EVENT_PTR(rapl_pkg_unit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	EVENT_PTR(rapl_pkg_scale),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static struct attribute_group rapl_events_pkg_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	.name  = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	.attrs = rapl_events_pkg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	.is_visible = rapl_not_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static struct attribute *rapl_events_ram[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	EVENT_PTR(rapl_ram),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	EVENT_PTR(rapl_ram_unit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	EVENT_PTR(rapl_ram_scale),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static struct attribute_group rapl_events_ram_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	.name  = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	.attrs = rapl_events_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	.is_visible = rapl_not_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static struct attribute *rapl_events_gpu[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	EVENT_PTR(rapl_gpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	EVENT_PTR(rapl_gpu_unit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	EVENT_PTR(rapl_gpu_scale),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static struct attribute_group rapl_events_gpu_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	.name  = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	.attrs = rapl_events_gpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	.is_visible = rapl_not_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static struct attribute *rapl_events_psys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	EVENT_PTR(rapl_psys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	EVENT_PTR(rapl_psys_unit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	EVENT_PTR(rapl_psys_scale),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static struct attribute_group rapl_events_psys_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	.name  = "events",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	.attrs = rapl_events_psys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	.is_visible = rapl_not_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static bool test_msr(int idx, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	return test_bit(idx, (unsigned long *) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static struct perf_msr intel_rapl_msrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	[PERF_RAPL_PP0]  = { MSR_PP0_ENERGY_STATUS,      &rapl_events_cores_group, test_msr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	[PERF_RAPL_PKG]  = { MSR_PKG_ENERGY_STATUS,      &rapl_events_pkg_group,   test_msr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	[PERF_RAPL_RAM]  = { MSR_DRAM_ENERGY_STATUS,     &rapl_events_ram_group,   test_msr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	[PERF_RAPL_PP1]  = { MSR_PP1_ENERGY_STATUS,      &rapl_events_gpu_group,   test_msr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	[PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group,  test_msr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  * Force to PERF_RAPL_MAX size due to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)  * - perf_msr_probe(PERF_RAPL_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)  * - want to use same event codes across both architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static struct perf_msr amd_rapl_msrs[PERF_RAPL_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	[PERF_RAPL_PKG]  = { MSR_AMD_PKG_ENERGY_STATUS,  &rapl_events_pkg_group,   test_msr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int rapl_cpu_offline(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	/* Check if exiting cpu is used for collecting rapl events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	pmu->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	/* Find a new cpu to collect rapl events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	/* Migrate rapl events to the new target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	if (target < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		cpumask_set_cpu(target, &rapl_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		pmu->cpu = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		perf_pmu_migrate_context(pmu->pmu, cpu, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int rapl_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	if (!pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		if (!pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		raw_spin_lock_init(&pmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		INIT_LIST_HEAD(&pmu->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		pmu->pmu = &rapl_pmus->pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		rapl_hrtimer_init(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	 * Check if there is an online cpu in the package which collects rapl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	 * events already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	if (target < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	cpumask_set_cpu(cpu, &rapl_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	pmu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static int rapl_check_hw_unit(struct rapl_model *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	u64 msr_rapl_power_unit_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	/* protect rdmsrl() to handle virtualization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	if (rdmsrl_safe(rm->msr_power_unit, &msr_rapl_power_unit_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	for (i = 0; i < NR_RAPL_DOMAINS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	switch (rm->unit_quirk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 * DRAM domain on HSW server and KNL has fixed energy unit which can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	 * different than the unit from power unit MSR. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	case RAPL_UNIT_QUIRK_INTEL_HSW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		rapl_hw_unit[PERF_RAPL_RAM] = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	 * SPR shares the same DRAM domain energy unit as HSW, plus it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	 * also has a fixed energy unit for Psys domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	case RAPL_UNIT_QUIRK_INTEL_SPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		rapl_hw_unit[PERF_RAPL_RAM] = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		rapl_hw_unit[PERF_RAPL_PSYS] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	 * Calculate the timer rate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	 * Use reference of 200W for scaling the timeout to avoid counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	 * overflows. 200W = 200 Joules/sec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	 * Divide interval by 2 to avoid lockstep (2 * 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	 * if hw unit is 32, then we use 2 ms 1/200/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	rapl_timer_ms = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	if (rapl_hw_unit[0] < 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		rapl_timer_ms = (1000 / (2 * 100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void __init rapl_advertise(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		hweight32(rapl_cntr_mask), rapl_timer_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	for (i = 0; i < NR_RAPL_DOMAINS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		if (rapl_cntr_mask & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			pr_info("hw unit of domain %s 2^-%d Joules\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 				rapl_domain_names[i], rapl_hw_unit[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static void cleanup_rapl_pmus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	for (i = 0; i < rapl_pmus->maxdie; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		kfree(rapl_pmus->pmus[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	kfree(rapl_pmus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static const struct attribute_group *rapl_attr_update[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	&rapl_events_cores_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	&rapl_events_pkg_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	&rapl_events_ram_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	&rapl_events_gpu_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	&rapl_events_psys_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static int __init init_rapl_pmus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	int maxdie = topology_max_packages() * topology_max_die_per_package();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	size = sizeof(*rapl_pmus) + maxdie * sizeof(struct rapl_pmu *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	rapl_pmus = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	if (!rapl_pmus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	rapl_pmus->maxdie		= maxdie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	rapl_pmus->pmu.attr_groups	= rapl_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	rapl_pmus->pmu.attr_update	= rapl_attr_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	rapl_pmus->pmu.task_ctx_nr	= perf_invalid_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	rapl_pmus->pmu.event_init	= rapl_pmu_event_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	rapl_pmus->pmu.add		= rapl_pmu_event_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	rapl_pmus->pmu.del		= rapl_pmu_event_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	rapl_pmus->pmu.start		= rapl_pmu_event_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	rapl_pmus->pmu.stop		= rapl_pmu_event_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	rapl_pmus->pmu.read		= rapl_pmu_event_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	rapl_pmus->pmu.module		= THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	rapl_pmus->pmu.capabilities	= PERF_PMU_CAP_NO_EXCLUDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static struct rapl_model model_snb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	.events		= BIT(PERF_RAPL_PP0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 			  BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 			  BIT(PERF_RAPL_PP1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static struct rapl_model model_snbep = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	.events		= BIT(PERF_RAPL_PP0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 			  BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 			  BIT(PERF_RAPL_RAM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static struct rapl_model model_hsw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	.events		= BIT(PERF_RAPL_PP0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 			  BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 			  BIT(PERF_RAPL_RAM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 			  BIT(PERF_RAPL_PP1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static struct rapl_model model_hsx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	.events		= BIT(PERF_RAPL_PP0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 			  BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 			  BIT(PERF_RAPL_RAM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	.unit_quirk	= RAPL_UNIT_QUIRK_INTEL_HSW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static struct rapl_model model_knl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	.events		= BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 			  BIT(PERF_RAPL_RAM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	.unit_quirk	= RAPL_UNIT_QUIRK_INTEL_HSW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static struct rapl_model model_skl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	.events		= BIT(PERF_RAPL_PP0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 			  BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 			  BIT(PERF_RAPL_RAM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 			  BIT(PERF_RAPL_PP1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 			  BIT(PERF_RAPL_PSYS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static struct rapl_model model_spr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	.events		= BIT(PERF_RAPL_PP0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 			  BIT(PERF_RAPL_PKG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 			  BIT(PERF_RAPL_RAM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			  BIT(PERF_RAPL_PSYS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	.unit_quirk	= RAPL_UNIT_QUIRK_INTEL_SPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	.msr_power_unit = MSR_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	.rapl_msrs      = intel_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static struct rapl_model model_amd_fam17h = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	.events		= BIT(PERF_RAPL_PKG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	.rapl_msrs      = amd_rapl_msrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static const struct x86_cpu_id rapl_model_match[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE,		&model_snb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X,	&model_snbep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE,		&model_snb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X,		&model_snbep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL,		&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X,		&model_hsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L,		&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G,		&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL,		&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G,		&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,		&model_hsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,		&model_hsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,	&model_knl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,	&model_knl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,		&model_hsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,	&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,	&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS,	&model_hsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,		&model_hsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,		&model_hsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,		&model_skl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&model_spr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	X86_MATCH_VENDOR_FAM(AMD,	0x17,		&model_amd_fam17h),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	X86_MATCH_VENDOR_FAM(HYGON,	0x18,		&model_amd_fam17h),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	X86_MATCH_VENDOR_FAM(AMD,	0x19,		&model_amd_fam17h),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static int __init rapl_pmu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	const struct x86_cpu_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	struct rapl_model *rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	id = x86_match_cpu(rapl_model_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	rm = (struct rapl_model *) id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	rapl_msrs = rm->rapl_msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 					false, (void *) &rm->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	ret = rapl_check_hw_unit(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	ret = init_rapl_pmus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	 * Install callbacks. Core will call them for each online cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 				"perf/x86/rapl:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 				rapl_cpu_online, rapl_cpu_offline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	rapl_advertise();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	pr_warn("Initialization failed (%d), disabled\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	cleanup_rapl_pmus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) module_init(rapl_pmu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static void __exit intel_rapl_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	perf_pmu_unregister(&rapl_pmus->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	cleanup_rapl_pmus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) module_exit(intel_rapl_exit);