^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <xen/interface/vcpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <xen/interface/xenpmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xen-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "pmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* x86_pmu.handle_irq definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "../events/perf_event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define XENPMU_IRQ_PROCESSING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct xenpmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Shared page between hypervisor and domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct xen_pmu_data *xenpmu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) uint8_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static DEFINE_PER_CPU(struct xenpmu, xenpmu_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define get_xenpmu_data() (this_cpu_ptr(&xenpmu_shared)->xenpmu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define get_xenpmu_flags() (this_cpu_ptr(&xenpmu_shared)->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Macro for computing address of a PMU MSR bank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) (uintptr_t)ctxt->field))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* AMD PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define F15H_NUM_COUNTERS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define F10H_NUM_COUNTERS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static __read_mostly uint32_t amd_counters_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static __read_mostly uint32_t amd_ctrls_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static __read_mostly int amd_msr_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static __read_mostly int k7_counters_mirrored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static __read_mostly int amd_num_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Intel PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define MSR_TYPE_COUNTER 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define MSR_TYPE_CTRL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define MSR_TYPE_GLOBAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define MSR_TYPE_ARCH_COUNTER 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MSR_TYPE_ARCH_CTRL 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define PMU_GENERAL_NR_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define PMU_GENERAL_NR_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) << PMU_GENERAL_NR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PMU_FIXED_NR_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define PMU_FIXED_NR_BITS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) << PMU_FIXED_NR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Alias registers (0x4c1) for full-width writes to PMCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define INTEL_PMC_TYPE_SHIFT 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void xen_pmu_arch_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) switch (boot_cpu_data.x86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) case 0x15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) amd_num_counters = F15H_NUM_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) amd_counters_base = MSR_F15H_PERF_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) amd_ctrls_base = MSR_F15H_PERF_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) amd_msr_step = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) k7_counters_mirrored = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) case 0x12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) case 0x14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) case 0x16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) amd_num_counters = F10H_NUM_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) amd_counters_base = MSR_K7_PERFCTR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) amd_ctrls_base = MSR_K7_EVNTSEL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) amd_msr_step = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) k7_counters_mirrored = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) amd_num_counters = F10H_NUM_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) amd_counters_base = MSR_K7_PERFCTR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) amd_ctrls_base = MSR_K7_EVNTSEL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) amd_msr_step = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) k7_counters_mirrored = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) uint32_t eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cpuid(0xa, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) PMU_GENERAL_NR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) PMU_FIXED_NR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline uint32_t get_fam15h_addr(u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) case MSR_K7_PERFCTR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) case MSR_K7_PERFCTR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case MSR_K7_PERFCTR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) case MSR_K7_PERFCTR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) case MSR_K7_EVNTSEL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) case MSR_K7_EVNTSEL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) case MSR_K7_EVNTSEL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case MSR_K7_EVNTSEL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline bool is_amd_pmu_msr(unsigned int msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if ((msr >= MSR_F15H_PERF_CTL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) (msr >= MSR_K7_EVNTSEL0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) msr < MSR_K7_PERFCTR0 + amd_num_counters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 msr_index_pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) switch (msr_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) case MSR_CORE_PERF_FIXED_CTR_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) case MSR_IA32_DS_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) case MSR_IA32_PEBS_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *type = MSR_TYPE_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) case MSR_CORE_PERF_GLOBAL_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) case MSR_CORE_PERF_GLOBAL_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *type = MSR_TYPE_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) (msr_index < MSR_CORE_PERF_FIXED_CTR0 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) intel_num_fixed_counters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *index = msr_index - MSR_CORE_PERF_FIXED_CTR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *type = MSR_TYPE_COUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if ((msr_index >= MSR_P6_EVNTSEL0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) (msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *index = msr_index - MSR_P6_EVNTSEL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *type = MSR_TYPE_ARCH_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if ((msr_index_pmc >= MSR_IA32_PERFCTR0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (msr_index_pmc < MSR_IA32_PERFCTR0 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) intel_num_arch_counters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *type = MSR_TYPE_ARCH_COUNTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *index = msr_index_pmc - MSR_IA32_PERFCTR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int index, bool is_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) uint64_t *reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct xen_pmu_intel_ctxt *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) uint64_t *fix_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct xen_pmu_cntr_pair *arch_cntr_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) uint8_t xenpmu_flags = get_xenpmu_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ctxt = &xenpmu_data->pmu.c.intel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) switch (msr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) reg = &ctxt->global_ovf_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case MSR_CORE_PERF_GLOBAL_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) reg = &ctxt->global_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case MSR_CORE_PERF_GLOBAL_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) reg = &ctxt->global_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) case MSR_CORE_PERF_FIXED_CTR_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) reg = &ctxt->fixed_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case MSR_TYPE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) fix_counters = field_offset(ctxt, fixed_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) reg = &fix_counters[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case MSR_TYPE_ARCH_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) arch_cntr_pair = field_offset(ctxt, arch_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) reg = &arch_cntr_pair[index].counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) case MSR_TYPE_ARCH_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) arch_cntr_pair = field_offset(ctxt, arch_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) reg = &arch_cntr_pair[index].control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (is_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *val = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *reg = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ctxt->global_status &= (~(*val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) uint64_t *reg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int i, off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct xen_pmu_amd_ctxt *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) uint64_t *counter_regs, *ctrl_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) uint8_t xenpmu_flags = get_xenpmu_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (k7_counters_mirrored &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) msr = get_fam15h_addr(msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ctxt = &xenpmu_data->pmu.c.amd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) for (i = 0; i < amd_num_counters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (msr == amd_ctrls_base + off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ctrl_regs = field_offset(ctxt, ctrls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) reg = &ctrl_regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else if (msr == amd_counters_base + off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) counter_regs = field_offset(ctxt, counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) reg = &counter_regs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) off += amd_msr_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (is_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *val = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *reg = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (is_amd_pmu_msr(msr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!xen_amd_pmu_emulate(msr, val, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *val = native_read_msr_safe(msr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int type, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (is_intel_pmu_msr(msr, &type, &index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!xen_intel_pmu_emulate(msr, val, type, index, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *val = native_read_msr_safe(msr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) uint64_t val = ((uint64_t)high << 32) | low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (is_amd_pmu_msr(msr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!xen_amd_pmu_emulate(msr, &val, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *err = native_write_msr_safe(msr, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int type, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (is_intel_pmu_msr(msr, &type, &index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!xen_intel_pmu_emulate(msr, &val, type, index, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) *err = native_write_msr_safe(msr, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static unsigned long long xen_amd_read_pmc(int counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct xen_pmu_amd_ctxt *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) uint64_t *counter_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) uint8_t xenpmu_flags = get_xenpmu_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) uint32_t msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) msr = amd_counters_base + (counter * amd_msr_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return native_read_msr_safe(msr, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ctxt = &xenpmu_data->pmu.c.amd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) counter_regs = field_offset(ctxt, counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return counter_regs[counter];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static unsigned long long xen_intel_read_pmc(int counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct xen_pmu_intel_ctxt *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) uint64_t *fixed_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct xen_pmu_cntr_pair *arch_cntr_pair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) uint8_t xenpmu_flags = get_xenpmu_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) uint32_t msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) msr = MSR_IA32_PERFCTR0 + counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return native_read_msr_safe(msr, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ctxt = &xenpmu_data->pmu.c.intel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) fixed_counters = field_offset(ctxt, fixed_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return fixed_counters[counter & 0xffff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) arch_cntr_pair = field_offset(ctxt, arch_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return arch_cntr_pair[counter].counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned long long xen_read_pmc(int counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return xen_amd_read_pmc(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return xen_intel_read_pmc(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int pmu_apic_update(uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!xenpmu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) pr_warn_once("%s: pmudata not initialized\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) xenpmu_data->pmu.l.lapic_lvtpc = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (get_xenpmu_flags() & XENPMU_IRQ_PROCESSING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* perf callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int xen_is_in_guest(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!xenpmu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pr_warn_once("%s: pmudata not initialized\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int xen_is_user_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!xenpmu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pr_warn_once("%s: pmudata not initialized\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return !!(xenpmu_data->pmu.r.regs.cpl & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static unsigned long xen_get_guest_ip(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!xenpmu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pr_warn_once("%s: pmudata not initialized\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return xenpmu_data->pmu.r.regs.ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static struct perf_guest_info_callbacks xen_guest_cbs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .is_in_guest = xen_is_in_guest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .is_user_mode = xen_is_user_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .get_guest_ip = xen_get_guest_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Convert registers from Xen's format to Linux' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct pt_regs *regs, uint64_t pmu_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) regs->ip = xen_regs->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) regs->cs = xen_regs->cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) regs->sp = xen_regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (pmu_flags & PMU_SAMPLE_PV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (pmu_flags & PMU_SAMPLE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) regs->cs |= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) regs->cs &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (xen_regs->cpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) regs->cs |= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) regs->cs &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int err, ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct pt_regs regs = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) uint8_t xenpmu_flags = get_xenpmu_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (!xenpmu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) pr_warn_once("%s: pmudata not initialized\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) this_cpu_ptr(&xenpmu_shared)->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) xenpmu_flags | XENPMU_IRQ_PROCESSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) xen_convert_regs(&xenpmu_data->pmu.r.regs, ®s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) xenpmu_data->pmu.pmu_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (x86_pmu.handle_irq(®s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Write out cached context to HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) this_cpu_ptr(&xenpmu_shared)->flags = xenpmu_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pr_warn_once("%s: failed hypercall, err: %d\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) bool is_xen_pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) void xen_pmu_init(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct xen_pmu_params xp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct xen_pmu_data *xenpmu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!xenpmu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pr_err("VPMU init: No memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pfn = virt_to_pfn(xenpmu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) xp.val = pfn_to_mfn(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) xp.vcpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) xp.version.maj = XENPMU_VER_MAJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) xp.version.min = XENPMU_VER_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) err = HYPERVISOR_xenpmu_op(XENPMU_init, &xp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) per_cpu(xenpmu_shared, cpu).flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!is_xen_pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) is_xen_pmu = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) perf_register_guest_info_callbacks(&xen_guest_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) xen_pmu_arch_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (err == -EOPNOTSUPP || err == -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pr_info_once("VPMU disabled by hypervisor.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pr_info_once("Could not initialize VPMU for cpu %d, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) cpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) free_pages((unsigned long)xenpmu_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) void xen_pmu_finish(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct xen_pmu_params xp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (xen_hvm_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) xp.vcpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) xp.version.maj = XENPMU_VER_MAJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) xp.version.min = XENPMU_VER_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) (void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }