Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef __KVM_X86_PMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define __KVM_X86_PMU_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define MAX_FIXED_COUNTERS	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) struct kvm_event_hw_type_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	u8 eventsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	u8 unit_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	unsigned event_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) struct kvm_pmu_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unsigned (*find_fixed_event)(int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		unsigned int idx, u64 *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	void (*refresh)(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	void (*init)(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	void (*reset)(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return pmu->counter_bitmask[pmc->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	u64 counter, enabled, running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	counter = pmc->counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (pmc->perf_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		counter += perf_event_read_value(pmc->perf_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 						 &enabled, &running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* FIXME: Scaling needed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	return counter & pmc_bitmask(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (pmc->perf_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		perf_event_release_kernel(pmc->perf_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		pmc->perf_event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		pmc->current_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		pmc_to_pmu(pmc)->event_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static inline void pmc_stop_counter(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (pmc->perf_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		pmc->counter = pmc_read_counter(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		pmc_release_perf_event(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static inline bool pmc_is_gp(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	return pmc->type == KVM_PMC_GP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return pmc->type == KVM_PMC_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 						 u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return !(pmu->global_ctrl_mask & data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* returns general purpose PMC with the specified MSR. Note that it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * paramenter to tell them apart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 					 u32 base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		u32 index = array_index_nospec(msr - base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 					       pmu->nr_arch_gp_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		return &pmu->gp_counters[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* returns fixed PMC with the specified MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	int base = MSR_CORE_PERF_FIXED_CTR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		u32 index = array_index_nospec(msr - base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 					       pmu->nr_arch_fixed_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		return &pmu->fixed_counters[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (!sample_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		sample_period = pmc_bitmask(pmc) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void kvm_pmu_reset(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void kvm_pmu_init(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) bool is_vmware_backdoor_pmc(u32 pmc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) extern struct kvm_pmu_ops intel_pmu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) extern struct kvm_pmu_ops amd_pmu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif /* __KVM_X86_PMU_H */