Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2015 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Author: Shannon Zhao <shannon.zhao@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/perf/arm_pmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <kvm/arm_pmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <kvm/arm_vgic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) static u32 kvm_pmu_event_mask(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	switch (kvm->arch.pmuver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	case ID_AA64DFR0_PMUVER_8_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 		return GENMASK(9, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	case ID_AA64DFR0_PMUVER_8_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	case ID_AA64DFR0_PMUVER_8_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	case ID_AA64DFR0_PMUVER_8_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 		return GENMASK(15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	default:		/* Shouldn't be here, just for sanity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 		WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * @select_idx: The counter index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct kvm_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct kvm_vcpu_arch *vcpu_arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	pmc -= pmc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	return container_of(vcpu_arch, struct kvm_vcpu, arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * kvm_pmu_pmc_is_chained - determine if the pmc is chained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * @pmc: The PMU counter pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * @select_idx: The counter index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return select_idx & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * @pmc: The PMU counter pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * When a pair of PMCs are chained together we use the low counter (canonical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * to hold the underlying perf event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	if (kvm_pmu_pmc_is_chained(pmc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	    kvm_pmu_idx_is_high_counter(pmc->idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		return pmc - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	return pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	if (kvm_pmu_idx_is_high_counter(pmc->idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		return pmc - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		return pmc + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * @select_idx: The counter index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u64 eventsel, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	select_idx |= 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (select_idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	reg = PMEVTYPER0_EL0 + select_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * kvm_pmu_get_pair_counter_value - get PMU counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * @pmc: The PMU counter pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 					  struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	u64 counter, counter_high, reg, enabled, running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (kvm_pmu_pmc_is_chained(pmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		pmc = kvm_pmu_get_canonical_pmc(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		reg = PMEVCNTR0_EL0 + pmc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		counter = __vcpu_sys_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		counter = lower_32_bits(counter) | (counter_high << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		counter = __vcpu_sys_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 * The real counter value is equal to the value of counter register plus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 * the value perf event counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	if (pmc->perf_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		counter += perf_event_read_value(pmc->perf_event, &enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 						 &running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	return counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * kvm_pmu_get_counter_value - get PMU counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * @select_idx: The counter index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	u64 counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	if (kvm_pmu_pmc_is_chained(pmc) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	    kvm_pmu_idx_is_high_counter(select_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		counter = upper_32_bits(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	else if (select_idx != ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		counter = lower_32_bits(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	return counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * kvm_pmu_set_counter_value - set PMU counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * @select_idx: The counter index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * @val: The counter value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	/* Recreate the perf event to reflect the updated sample_period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	kvm_pmu_create_perf_event(vcpu, select_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * kvm_pmu_release_perf_event - remove the perf event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * @pmc: The PMU counter pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	pmc = kvm_pmu_get_canonical_pmc(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (pmc->perf_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		perf_event_disable(pmc->perf_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		perf_event_release_kernel(pmc->perf_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		pmc->perf_event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * kvm_pmu_stop_counter - stop PMU counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * @pmc: The PMU counter pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * If this counter has been configured to monitor some event, release it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	u64 counter, reg, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	pmc = kvm_pmu_get_canonical_pmc(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (!pmc->perf_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		reg = PMCCNTR_EL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		val = counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		reg = PMEVCNTR0_EL0 + pmc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		val = lower_32_bits(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	__vcpu_sys_reg(vcpu, reg) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (kvm_pmu_pmc_is_chained(pmc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	kvm_pmu_release_perf_event(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		pmu->pmc[i].idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * kvm_pmu_vcpu_reset - reset pmu state for cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	for_each_set_bit(i, &mask, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	val &= ARMV8_PMU_PMCR_N_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		return BIT(ARMV8_PMU_CYCLE_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * kvm_pmu_enable_counter_mask - enable selected PMU counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  * @val: the value guest writes to PMCNTENSET register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  * Call perf_event_enable to start counting the perf event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	struct kvm_pmc *pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		if (!(val & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		pmc = &pmu->pmc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		/* A change in the enable state may affect the chain state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		kvm_pmu_update_pmc_chained(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		kvm_pmu_create_perf_event(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		/* At this point, pmc must be the canonical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		if (pmc->perf_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			perf_event_enable(pmc->perf_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 				kvm_debug("fail to enable perf event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * kvm_pmu_disable_counter_mask - disable selected PMU counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * @val: the value guest writes to PMCNTENCLR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * Call perf_event_disable to stop counting the perf event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	struct kvm_pmc *pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if (!(val & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		pmc = &pmu->pmc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		/* A change in the enable state may affect the chain state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		kvm_pmu_update_pmc_chained(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		kvm_pmu_create_perf_event(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		/* At this point, pmc must be the canonical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		if (pmc->perf_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			perf_event_disable(pmc->perf_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	u64 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		reg &= kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	bool overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (!kvm_vcpu_has_pmu(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	overflow = !!kvm_pmu_overflow_status(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	if (pmu->irq_level == overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	pmu->irq_level = overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 					      pmu->irq_num, overflow, pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (likely(irqchip_in_kernel(vcpu->kvm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return pmu->irq_level != run_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  * Reflect the PMU overflow interrupt output level into the kvm_run structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	/* Populate the timer bitmap for user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (vcpu->arch.pmu.irq_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * kvm_pmu_flush_hwstate - flush pmu state to cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  * Check if the PMU has overflowed while we were running in the host, and inject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * an interrupt if that was the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	kvm_pmu_update_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * kvm_pmu_sync_hwstate - sync pmu state from cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  * Check if the PMU has overflowed while we were running in the guest, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * inject an interrupt if that was the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	kvm_pmu_update_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * to the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * This is why we need a callback to do it once outside of the NMI context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct kvm_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	pmu = container_of(work, struct kvm_pmu, overflow_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	vcpu = kvm_pmc_to_vcpu(pmu->pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * When the perf event overflows, set the overflow status and inform the vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 				  struct perf_sample_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 				  struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	int idx = pmc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	u64 period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 * Reset the sample period to the architectural limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 * i.e. the point where the counter overflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	period = -(local64_read(&perf_event->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		period &= GENMASK(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	local64_set(&perf_event->hw.period_left, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	perf_event->attr.sample_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	perf_event->hw.sample_period = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (kvm_pmu_overflow_status(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		if (!in_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * kvm_pmu_software_increment - do software increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * @val: the value guest writes to PMSWINC register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	/* Weed out disabled counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		u64 type, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		if (!(val & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		/* PMSWINC only applies to ... SW_INC! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		type &= kvm_pmu_event_mask(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		/* increment this even SW_INC counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		reg = lower_32_bits(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		if (reg) /* no overflow on the low part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			/* increment the high counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			reg = lower_32_bits(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			if (!reg) /* mark overflow on the high counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			/* mark overflow on low counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * kvm_pmu_handle_pmcr - handle PMCR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * @val: the value guest writes to PMCR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (val & ARMV8_PMU_PMCR_E) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		kvm_pmu_enable_counter_mask(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		kvm_pmu_disable_counter_mask(vcpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (val & ARMV8_PMU_PMCR_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (val & ARMV8_PMU_PMCR_P) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		for_each_set_bit(i, &mask, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			kvm_pmu_set_counter_value(vcpu, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * kvm_pmu_create_perf_event - create a perf event for a counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * @select_idx: The number of selected counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct kvm_pmc *pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	u64 eventsel, counter, reg, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * For chained counters the event type and filtering attributes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * obtained from the low/even counter. We also use this counter to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 * determine if the event is enabled/disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	data = __vcpu_sys_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	kvm_pmu_stop_counter(vcpu, pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* Software increment event doesn't need to be backed by a perf event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * If we have a filter in place and that the event isn't allowed, do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * not install a perf event either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (vcpu->kvm->arch.pmu_filter &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	memset(&attr, 0, sizeof(struct perf_event_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	attr.type = PERF_TYPE_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	attr.size = sizeof(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	attr.pinned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	attr.exclude_hv = 1; /* Don't count EL2 events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	attr.exclude_host = 1; /* Don't count host events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	attr.config = eventsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (kvm_pmu_pmc_is_chained(pmc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		 * The initial sample period (overflow count) of an event. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		 * chained counters we only support overflow interrupts on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 * high counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		attr.sample_period = (-counter) & GENMASK(63, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		event = perf_event_create_kernel_counter(&attr, -1, current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 							 kvm_pmu_perf_overflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 							 pmc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		/* The initial sample period (overflow count) of an event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			attr.sample_period = (-counter) & GENMASK(63, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			attr.sample_period = (-counter) & GENMASK(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		event = perf_event_create_kernel_counter(&attr, -1, current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 						 kvm_pmu_perf_overflow, pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if (IS_ERR(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		pr_err_once("kvm: pmu event creation failed %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			    PTR_ERR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	pmc->perf_event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * kvm_pmu_update_pmc_chained - update chained bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * @select_idx: The number of selected counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * Update the chained bitmap based on the event type written in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * typer register and the enable state of the odd register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	bool new_state, old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	old_state = kvm_pmu_pmc_is_chained(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (old_state == new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	kvm_pmu_stop_counter(vcpu, canonical_pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		 * During promotion from !chained to chained we must ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		 * the adjacent counter is stopped and its event destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  * @vcpu: The vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * @data: The data guest writes to PMXEVTYPER_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * @select_idx: The number of selected counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  * event with given hardware event number. Here we call perf_event API to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  * emulate this action and create a kernel perf event for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				    u64 select_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	u64 reg, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	mask  =  ARMV8_PMU_EVTYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	mask |= kvm_pmu_event_mask(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	__vcpu_sys_reg(vcpu, reg) = data & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	kvm_pmu_update_pmc_chained(vcpu, select_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	kvm_pmu_create_perf_event(vcpu, select_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) static int kvm_pmu_probe_pmuver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	struct perf_event_attr attr = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct arm_pmu *pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	int pmuver = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * Create a dummy event that only counts user cycles. As we'll never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * leave this function with the event being live, it will never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 * count anything. But it allows us to probe some of the PMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	 * details. Yes, this is terrible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	attr.type = PERF_TYPE_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	attr.size = sizeof(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	attr.pinned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	attr.disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	attr.exclude_user = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	attr.exclude_kernel = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	attr.exclude_hv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	attr.exclude_host = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	attr.sample_period = GENMASK(63, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	event = perf_event_create_kernel_counter(&attr, -1, current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 						 kvm_pmu_perf_overflow, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	if (IS_ERR(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		pr_err_once("kvm: pmu event creation failed %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			    PTR_ERR(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		return 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (event->pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		pmu = to_arm_pmu(event->pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		if (pmu->pmuver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			pmuver = pmu->pmuver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	perf_event_disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	perf_event_release_kernel(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	return pmuver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	u64 val, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	int base, i, nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (!pmceid1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		val = read_sysreg(pmceid0_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		val = read_sysreg(pmceid1_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		 * as RAZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		base = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (!bmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	for (i = 0; i < 32; i += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		u64 byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		byte = bitmap_get_value8(bmap, base + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		mask |= byte << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		if (nr_events >= (0x4000 + base + 32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			mask |= byte << (32 + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	return val & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (!kvm_vcpu_has_pmu(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (!vcpu->arch.pmu.created)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * A valid interrupt configuration for the PMU is either to have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * properly configured interrupt number and using an in-kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (irqchip_in_kernel(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		int irq = vcpu->arch.pmu.irq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		 * If we are using an in-kernel vgic, at this point we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		 * the vgic will be initialized, so we can check the PMU irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		 * number against the dimensions of the vgic and make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		 * it's valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		   return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (irqchip_in_kernel(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		 * If using the PMU with an in-kernel virtual GIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		 * implementation, we require the GIC to be already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		 * initialized when initializing the PMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		if (!vgic_initialized(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		if (!kvm_arm_pmu_irq_initialized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 					 &vcpu->arch.pmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	init_irq_work(&vcpu->arch.pmu.overflow_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		      kvm_pmu_perf_overflow_notify_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	vcpu->arch.pmu.created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887)  * For one VM the interrupt type must be same for each vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888)  * As a PPI, the interrupt number is the same for all vcpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  * while as an SPI it must be a separate number per vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		if (!kvm_arm_pmu_irq_initialized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		if (irq_is_ppi(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			if (vcpu->arch.pmu.irq_num != irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			if (vcpu->arch.pmu.irq_num == irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (!kvm_vcpu_has_pmu(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (vcpu->arch.pmu.created)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (!vcpu->kvm->arch.pmuver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (vcpu->kvm->arch.pmuver == 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		int __user *uaddr = (int __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if (!irqchip_in_kernel(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		if (get_user(irq, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		if (!pmu_irq_is_valid(vcpu->kvm, irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		if (kvm_arm_pmu_irq_initialized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		vcpu->arch.pmu.irq_num = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		struct kvm_pmu_event_filter __user *uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		struct kvm_pmu_event_filter filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		int nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		     filter.action != KVM_PMU_EVENT_DENY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		mutex_lock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		if (!vcpu->kvm->arch.pmu_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			if (!vcpu->kvm->arch.pmu_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 				mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			 * The default depends on the first applied filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			 * If it allows events, the default is to deny.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			 * Conversely, if the first filter denies a set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			 * events, the default is to allow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			if (filter.action == KVM_PMU_EVENT_ALLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		if (filter.action == KVM_PMU_EVENT_ALLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	case KVM_ARM_VCPU_PMU_V3_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		return kvm_arm_pmu_v3_init(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		int __user *uaddr = (int __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		if (!irqchip_in_kernel(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (!kvm_vcpu_has_pmu(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		if (!kvm_arm_pmu_irq_initialized(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		irq = vcpu->arch.pmu.irq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		return put_user(irq, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	case KVM_ARM_VCPU_PMU_V3_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	case KVM_ARM_VCPU_PMU_V3_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	case KVM_ARM_VCPU_PMU_V3_FILTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (kvm_vcpu_has_pmu(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }