^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2019 Arm Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Andrew Murray <Andrew.Murray@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/kvm_hyp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Given the perf event attributes and system type, determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * if we are going to need to switch counters at guest entry/exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * With VHE the guest kernel runs at EL1 and the host at EL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * where user (EL0) is excluded then we have no reason to switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (has_vhe() && attr->exclude_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Only switch if attributes are different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return (attr->exclude_host != attr->exclude_guest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Add events to track that we may want to switch at guest entry/exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (!attr->exclude_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ctx->pmu_events.events_host |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!attr->exclude_guest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ctx->pmu_events.events_guest |= set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Stop tracking events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void kvm_clr_pmu_events(u32 clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!kvm_arm_support_pmu_v3() || !ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ctx->pmu_events.events_host &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ctx->pmu_events.events_guest &= ~clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PMEVTYPER_READ_CASE(idx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) case idx: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return read_sysreg(pmevtyper##idx##_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PMEVTYPER_WRITE_CASE(idx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) case idx: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) write_sysreg(val, pmevtyper##idx##_el0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PMEVTYPER_CASES(readwrite) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) PMEVTYPER_##readwrite##_CASE(0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) PMEVTYPER_##readwrite##_CASE(1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) PMEVTYPER_##readwrite##_CASE(2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) PMEVTYPER_##readwrite##_CASE(3); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) PMEVTYPER_##readwrite##_CASE(4); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) PMEVTYPER_##readwrite##_CASE(5); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) PMEVTYPER_##readwrite##_CASE(6); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) PMEVTYPER_##readwrite##_CASE(7); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) PMEVTYPER_##readwrite##_CASE(8); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) PMEVTYPER_##readwrite##_CASE(9); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) PMEVTYPER_##readwrite##_CASE(10); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) PMEVTYPER_##readwrite##_CASE(11); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) PMEVTYPER_##readwrite##_CASE(12); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) PMEVTYPER_##readwrite##_CASE(13); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) PMEVTYPER_##readwrite##_CASE(14); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) PMEVTYPER_##readwrite##_CASE(15); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) PMEVTYPER_##readwrite##_CASE(16); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) PMEVTYPER_##readwrite##_CASE(17); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) PMEVTYPER_##readwrite##_CASE(18); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) PMEVTYPER_##readwrite##_CASE(19); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) PMEVTYPER_##readwrite##_CASE(20); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) PMEVTYPER_##readwrite##_CASE(21); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) PMEVTYPER_##readwrite##_CASE(22); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) PMEVTYPER_##readwrite##_CASE(23); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) PMEVTYPER_##readwrite##_CASE(24); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) PMEVTYPER_##readwrite##_CASE(25); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) PMEVTYPER_##readwrite##_CASE(26); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) PMEVTYPER_##readwrite##_CASE(27); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) PMEVTYPER_##readwrite##_CASE(28); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) PMEVTYPER_##readwrite##_CASE(29); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) PMEVTYPER_##readwrite##_CASE(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Read a value direct from PMEVTYPER<idx> where idx is 0-30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) PMEVTYPER_CASES(READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) case ARMV8_PMU_CYCLE_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return read_sysreg(pmccfiltr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Write a value direct to PMEVTYPER<idx> where idx is 0-30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) PMEVTYPER_CASES(WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case ARMV8_PMU_CYCLE_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) write_sysreg(val, pmccfiltr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Modify ARMv8 PMU events to include EL0 counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void kvm_vcpu_pmu_enable_el0(unsigned long events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) for_each_set_bit(counter, &events, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) typer = kvm_vcpu_pmu_read_evtype_direct(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) typer &= ~ARMV8_PMU_EXCLUDE_EL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kvm_vcpu_pmu_write_evtype_direct(counter, typer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Modify ARMv8 PMU events to exclude EL0 counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void kvm_vcpu_pmu_disable_el0(unsigned long events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u64 typer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) for_each_set_bit(counter, &events, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) typer = kvm_vcpu_pmu_read_evtype_direct(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) typer |= ARMV8_PMU_EXCLUDE_EL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kvm_vcpu_pmu_write_evtype_direct(counter, typer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * On VHE ensure that only guest events have EL0 counting enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * This is called from both vcpu_{load,put} and the sysreg handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Since the latter is preemptible, special care must be taken to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * disable preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct kvm_host_data *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 events_guest, events_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!kvm_arm_support_pmu_v3() || !has_vhe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) host = this_cpu_ptr_hyp_sym(kvm_host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) events_guest = host->pmu_events.events_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) events_host = host->pmu_events.events_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) kvm_vcpu_pmu_enable_el0(events_guest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) kvm_vcpu_pmu_disable_el0(events_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * On VHE ensure that only host events have EL0 counting enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct kvm_host_data *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 events_guest, events_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!kvm_arm_support_pmu_v3() || !has_vhe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) host = this_cpu_ptr_hyp_sym(kvm_host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) events_guest = host->pmu_events.events_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) events_host = host->pmu_events.events_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) kvm_vcpu_pmu_enable_el0(events_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) kvm_vcpu_pmu_disable_el0(events_guest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }