^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * KVM PMU support for AMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2015, Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Wei Huang <wei@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Implementation is based on pmu_intel.c file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "cpuid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "lapic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "pmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) enum pmu_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) PMU_TYPE_COUNTER = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) PMU_TYPE_EVNTSEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) enum index {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) INDEX_ZERO = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) INDEX_ONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) INDEX_TWO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) INDEX_THREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) INDEX_FOUR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) INDEX_FIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) INDEX_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* duplicated from amd_perfmon_event_map, K7 and above should work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (type == PMU_TYPE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return MSR_F15H_PERF_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return MSR_F15H_PERF_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (type == PMU_TYPE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return MSR_K7_PERFCTR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return MSR_K7_EVNTSEL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static enum index msr_to_index(u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) switch (msr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case MSR_F15H_PERF_CTL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) case MSR_F15H_PERF_CTR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) case MSR_K7_EVNTSEL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) case MSR_K7_PERFCTR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return INDEX_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) case MSR_F15H_PERF_CTL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) case MSR_F15H_PERF_CTR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) case MSR_K7_EVNTSEL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) case MSR_K7_PERFCTR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return INDEX_ONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) case MSR_F15H_PERF_CTL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) case MSR_F15H_PERF_CTR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case MSR_K7_EVNTSEL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) case MSR_K7_PERFCTR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return INDEX_TWO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) case MSR_F15H_PERF_CTL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) case MSR_F15H_PERF_CTR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) case MSR_K7_EVNTSEL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) case MSR_K7_PERFCTR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return INDEX_THREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) case MSR_F15H_PERF_CTL4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) case MSR_F15H_PERF_CTR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return INDEX_FOUR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case MSR_F15H_PERF_CTL5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case MSR_F15H_PERF_CTR5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return INDEX_FIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return INDEX_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) enum pmu_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) switch (msr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) case MSR_F15H_PERF_CTL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) case MSR_F15H_PERF_CTL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) case MSR_F15H_PERF_CTL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) case MSR_F15H_PERF_CTL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) case MSR_F15H_PERF_CTL4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) case MSR_F15H_PERF_CTL5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (type != PMU_TYPE_EVNTSEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) case MSR_F15H_PERF_CTR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) case MSR_F15H_PERF_CTR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) case MSR_F15H_PERF_CTR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) case MSR_F15H_PERF_CTR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) case MSR_F15H_PERF_CTR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case MSR_F15H_PERF_CTR5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (type != PMU_TYPE_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return &pmu->gp_counters[msr_to_index(msr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (amd_event_mapping[i].eventsel == event_select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) && amd_event_mapping[i].unit_mask == unit_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (i == ARRAY_SIZE(amd_event_mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return PERF_COUNT_HW_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return amd_event_mapping[i].event_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static unsigned amd_find_fixed_event(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return PERF_COUNT_HW_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * The idx is contiguous. The MSRs are not. The counter MSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * are interleaved with the event select MSRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pmc_idx *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) idx &= ~(3u << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return (idx >= pmu->nr_arch_gp_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* idx is the ECX register of RDPMC instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned int idx, u64 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct kvm_pmc *counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) idx &= ~(3u << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (idx >= pmu->nr_arch_gp_counters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) counters = pmu->gp_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return &counters[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct kvm_pmc *pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct kvm_pmc *pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 msr = msr_info->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* MSR_PERFCTRn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) msr_info->data = pmc_read_counter(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* MSR_EVNTSELn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) msr_info->data = pmc->eventsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct kvm_pmc *pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 msr = msr_info->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u64 data = msr_info->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* MSR_PERFCTRn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pmc->counter += data - pmc_read_counter(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* MSR_EVNTSELn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (pmc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (data == pmc->eventsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!(data & pmu->reserved_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) reprogram_gp_counter(pmc, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pmu->reserved_bits = 0xfffffff000280000ull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pmu->version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* not applicable to AMD; but clean them to prevent any fall out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pmu->nr_arch_fixed_counters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pmu->global_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void amd_pmu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pmu->gp_counters[i].type = KVM_PMC_GP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pmu->gp_counters[i].vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pmu->gp_counters[i].idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pmu->gp_counters[i].current_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void amd_pmu_reset(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct kvm_pmc *pmc = &pmu->gp_counters[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pmc_stop_counter(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pmc->counter = pmc->eventsel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct kvm_pmu_ops amd_pmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .pmc_perf_hw_id = amd_pmc_perf_hw_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .find_fixed_event = amd_find_fixed_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .pmc_is_enabled = amd_pmc_is_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .msr_idx_to_pmc = amd_msr_idx_to_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .is_valid_msr = amd_is_valid_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .get_msr = amd_pmu_get_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .set_msr = amd_pmu_set_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .refresh = amd_pmu_refresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .init = amd_pmu_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .reset = amd_pmu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };