^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012,2013 - ARM Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Derived from arch/arm/kvm/coproc.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2012 - Virtual Open Systems and Columbia University
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Authors: Rusty Russell <rusty@rustcorp.com.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Christoffer Dall <c.dall@virtualopensystems.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bsearch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/esr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/kvm_arm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/kvm_hyp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <trace/events/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "sys_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * All of this file is extremely similar to the ARM coproc.c, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * types are different. My gut feeling is that it should be pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * easy to merge, but that would be an ABI breakage -- again. VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * would also need to be abstracted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * For AArch32, we only take care of what is being trapped. Anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * that has to do with init and userspace access has to go via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * 64bit interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define reg_to_encoding(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static bool read_from_write_only(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct sys_reg_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) print_sys_reg_instr(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static bool write_to_read_only(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct sys_reg_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) print_sys_reg_instr(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u64 val = 0x8badf00d8badf00d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (vcpu->arch.sysregs_loaded_on_cpu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __vcpu_read_sys_reg_from_cpu(reg, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return __vcpu_sys_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (vcpu->arch.sysregs_loaded_on_cpu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __vcpu_write_sys_reg_to_cpu(val, reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __vcpu_sys_reg(vcpu, reg) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static u32 cache_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define CSSELR_MAX 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Which cache CCSIDR represents depends on CSSELR value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static u32 get_ccsidr(u32 csselr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 ccsidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Make sure noone else changes CSSELR during this! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) write_sysreg(csselr, csselr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ccsidr = read_sysreg(ccsidr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ccsidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static bool access_dcsw(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return read_from_write_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Only track S/W ops if we don't have FWB. It still indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * that the guest is a bit broken (S/W operations should only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * be done by firmware, knowing that there is only a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * CPU left in the system, and certainly not from non-secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * software).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kvm_set_way_flush(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) switch (r->aarch32_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) case AA32_LO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *mask = GENMASK_ULL(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) case AA32_HI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *mask = GENMASK_ULL(63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *shift = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *mask = GENMASK_ULL(63, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Generic accessor for VM registers. Only called as long as HCR_TVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * is set. If the guest enables the MMU, we stop trapping the VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * sys_regs and leave it in complete control of the caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static bool access_vm_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) bool was_enabled = vcpu_has_cache_enabled(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u64 val, mask, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) BUG_ON(!p->is_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) get_access_mask(r, &mask, &shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (~mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) val = vcpu_read_sys_reg(vcpu, r->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) val |= (p->regval & (mask >> shift)) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) vcpu_write_sys_reg(vcpu, val, r->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) kvm_toggle_cache(vcpu, was_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static bool access_actlr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u64 mask, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return ignore_write(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) get_access_mask(r, &mask, &shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Trap handler for the GICv3 SGI generation system register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Forward the request to the VGIC emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * The cp15_64 code makes sure this automatically works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * for both AArch64 and AArch32 accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static bool access_gic_sgi(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bool g1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return read_from_write_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (p->Op0 == 0) { /* AArch32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) switch (p->Op1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) default: /* Keep GCC quiet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) case 0: /* ICC_SGI1R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) g1 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) case 1: /* ICC_ASGI1R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) case 2: /* ICC_SGI0R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) g1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) } else { /* AArch64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) switch (p->Op2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) default: /* Keep GCC quiet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) case 5: /* ICC_SGI1R_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) g1 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) case 6: /* ICC_ASGI1R_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) case 7: /* ICC_SGI0R_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) g1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static bool access_gic_sre(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return ignore_write(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static bool trap_raz_wi(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return ignore_write(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return read_zero(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * system, these registers should UNDEF. LORID_EL1 being a RO register, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * treat it separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static bool trap_loregion(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 sr = reg_to_encoding(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (p->is_write && sr == SYS_LORID_EL1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return write_to_read_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return trap_raz_wi(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return ignore_write(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) p->regval = (1 << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return ignore_write(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) p->regval = read_sysreg(dbgauthstatus_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * We want to avoid world-switching all the DBG registers all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * - If we've touched any debug register, it is likely that we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * going to touch more of them. It then makes sense to disable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * traps and start doing the save/restore dance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * then mandatory to save/restore the registers, as the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * depends on them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * For this, we use a DIRTY bit, indicating the guest has modified the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * debug registers, used as follow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * On guest entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * - If the dirty bit is set (because we're coming back from trapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * disable the traps, save host registers, restore guest registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * set the dirty bit, disable the traps, save host registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * restore guest registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * - Otherwise, enable the traps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * On guest exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * - If the dirty bit is set, save guest registers, restore host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * registers and clear the dirty bit. This ensure that the host can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * now use the debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static bool trap_debug_regs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) vcpu_write_sys_reg(vcpu, p->regval, r->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) p->regval = vcpu_read_sys_reg(vcpu, r->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * reg_to_dbg/dbg_to_reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * A 32 bit write to a debug register leave top bits alone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * A 32 bit read from a debug register only returns the bottom bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * hyp.S code switches between host and guest values in future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static void reg_to_dbg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u64 *dbg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) u64 mask, shift, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) get_access_mask(rd, &mask, &shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) val = *dbg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) val |= (p->regval & (mask >> shift)) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *dbg_reg = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void dbg_to_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u64 *dbg_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u64 mask, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) get_access_mask(rd, &mask, &shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) p->regval = (*dbg_reg & mask) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static bool trap_bvr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) reg_to_dbg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dbg_to_reg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void reset_bvr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static bool trap_bcr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) reg_to_dbg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dbg_to_reg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void reset_bcr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static bool trap_wvr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) reg_to_dbg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) dbg_to_reg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) trace_trap_reg(__func__, rd->CRm, p->is_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void reset_wvr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static bool trap_wcr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) reg_to_dbg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dbg_to_reg(vcpu, p, rd, dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static void reset_wcr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) u64 amair = read_sysreg(amair_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) u64 actlr = read_sysreg(actlr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) u64 mpidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Map the vcpu_id into the first three affinity level fields of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * the MPIDR. We limit the number of VCPUs in level 0 due to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * limitation to 16 CPUs in that level in the ICC_SGIxR registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * of the GICv3 to be able to address each CPU directly when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * sending IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (kvm_vcpu_has_pmu(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return REG_HIDDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) u64 pmcr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* No PMU available, PMCR_EL0 may UNDEF... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!kvm_arm_support_pmu_v3())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pmcr = read_sysreg(pmcr_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * except PMCR.E resetting to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!system_supports_32bit_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) val |= ARMV8_PMU_PMCR_LC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) __vcpu_sys_reg(vcpu, r->reg) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return !enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (pmu_access_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Only update writeable bits of PMCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) val = __vcpu_sys_reg(vcpu, PMCR_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) val &= ~ARMV8_PMU_PMCR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) val |= p->regval & ARMV8_PMU_PMCR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!system_supports_32bit_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) val |= ARMV8_PMU_PMCR_LC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) kvm_pmu_handle_pmcr(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) kvm_vcpu_pmu_restore_guest(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* PMCR.P & PMCR.C are RAZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) val = __vcpu_sys_reg(vcpu, PMCR_EL0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) p->regval = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (pmu_access_event_counter_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* return PMSELR.SEL field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) & ARMV8_PMU_COUNTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u64 pmceid, mask, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) BUG_ON(p->is_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (pmu_access_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) get_access_mask(r, &mask, &shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) pmceid &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) pmceid >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) p->regval = pmceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) u64 pmcr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u64 idx = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (r->CRn == 9 && r->CRm == 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (r->Op2 == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* PMXEVCNTR_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (pmu_access_event_counter_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) & ARMV8_PMU_COUNTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) } else if (r->Op2 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* PMCCNTR_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (pmu_access_cycle_counter_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) idx = ARMV8_PMU_CYCLE_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else if (r->CRn == 0 && r->CRm == 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* PMCCNTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (pmu_access_event_counter_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) idx = ARMV8_PMU_CYCLE_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* PMEVCNTRn_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (pmu_access_event_counter_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* Catch any decoding mistake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) WARN_ON(idx == ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!pmu_counter_idx_valid(vcpu, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (pmu_access_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) kvm_pmu_set_counter_value(vcpu, idx, p->regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) p->regval = kvm_pmu_get_counter_value(vcpu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) u64 idx, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (pmu_access_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* PMXEVTYPER_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) reg = PMEVTYPER0_EL0 + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (idx == ARMV8_PMU_CYCLE_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) reg = PMCCFILTR_EL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* PMEVTYPERn_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) reg = PMEVTYPER0_EL0 + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (!pmu_counter_idx_valid(vcpu, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) kvm_vcpu_pmu_restore_guest(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u64 val, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (pmu_access_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) mask = kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) val = p->regval & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (r->Op2 & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* accessing PMCNTENSET_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) kvm_pmu_enable_counter_mask(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) kvm_vcpu_pmu_restore_guest(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* accessing PMCNTENCLR_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) kvm_pmu_disable_counter_mask(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) u64 mask = kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (check_pmu_access_disabled(vcpu, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) u64 val = p->regval & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (r->Op2 & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* accessing PMINTENSET_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* accessing PMINTENCLR_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) u64 mask = kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (pmu_access_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (r->CRm & 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* accessing PMOVSSET_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* accessing PMOVSCLR_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return read_from_write_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (pmu_write_swinc_el0_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) mask = kvm_pmu_valid_counter_mask(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) kvm_pmu_software_increment(vcpu, p->regval & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!vcpu_mode_priv(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) p->regval & ARMV8_PMU_USERENR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) & ARMV8_PMU_USERENR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) #define PMU_SYS_REG(r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* Macro to expand the PMEVCNTRn_EL0 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) #define PMU_PMEVCNTR_EL0(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* Macro to expand the PMEVTYPERn_EL0 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) #define PMU_PMEVTYPER_EL0(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* Macro to expand the AMU counter and type registers*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * If we land here on a PtrAuth access, that is because we didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * fixup the access on exit by allowing the PtrAuth sysregs. The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * way this happens is when the guest does not have PtrAuth support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) #define __PTRAUTH_KEY(k) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .visibility = ptrauth_visibility}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) #define PTRAUTH_KEY(k) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __PTRAUTH_KEY(k ## KEYLO_EL1), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) __PTRAUTH_KEY(k ## KEYHI_EL1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static bool access_arch_timer(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) enum kvm_arch_timers tmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) enum kvm_arch_timer_regs treg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) u64 reg = reg_to_encoding(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) case SYS_CNTP_TVAL_EL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) case SYS_AARCH32_CNTP_TVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) tmr = TIMER_PTIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) treg = TIMER_REG_TVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case SYS_CNTP_CTL_EL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case SYS_AARCH32_CNTP_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) tmr = TIMER_PTIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) treg = TIMER_REG_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) case SYS_CNTP_CVAL_EL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) case SYS_AARCH32_CNTP_CVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) tmr = TIMER_PTIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) treg = TIMER_REG_CVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) #define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Read a sanitised cpufeature ID register by sys_reg_desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static u64 read_id_reg(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct sys_reg_desc const *r, bool raz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) u32 id = reg_to_encoding(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) case SYS_ID_AA64PFR0_EL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (!vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) val &= ~FEATURE(ID_AA64PFR0_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) val &= ~FEATURE(ID_AA64PFR0_AMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) val &= ~FEATURE(ID_AA64PFR0_CSV2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) val &= ~FEATURE(ID_AA64PFR0_CSV3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) case SYS_ID_AA64PFR1_EL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) val &= ~FEATURE(ID_AA64PFR1_MTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) case SYS_ID_AA64ISAR1_EL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!vcpu_has_ptrauth(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) val &= ~(FEATURE(ID_AA64ISAR1_APA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) FEATURE(ID_AA64ISAR1_API) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) FEATURE(ID_AA64ISAR1_GPA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) FEATURE(ID_AA64ISAR1_GPI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) case SYS_ID_AA64DFR0_EL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Limit debug to ARMv8.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Limit guests to PMUv3 for ARMv8.4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) val = cpuid_feature_cap_perfmon_field(val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ID_AA64DFR0_PMUVER_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) case SYS_ID_DFR0_EL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* Limit guests to PMUv3 for ARMv8.4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) val = cpuid_feature_cap_perfmon_field(val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ID_DFR0_PERFMON_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) u32 id = reg_to_encoding(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) case SYS_ID_AA64ZFR0_EL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (!vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return REG_RAZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* cpufeature ID register access trap handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static bool __access_id_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) const struct sys_reg_desc *r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) bool raz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return write_to_read_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) p->regval = read_id_reg(vcpu, r, raz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static bool access_id_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) bool raz = sysreg_visible_as_raz(vcpu, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return __access_id_reg(vcpu, p, r, raz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return __access_id_reg(vcpu, p, r, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /* Visibility overrides for SVE-specific control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) const struct sys_reg_desc *rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (vcpu_has_sve(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return REG_HIDDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) const u64 id = sys_reg_to_index(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) u8 csv2, csv3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) err = reg_from_user(&val, uaddr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * it doesn't promise more than what is actually provided (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * guest could otherwise be covered in ectoplasmic residue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (csv2 > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /* Same thing for CSV3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (csv3 > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* We can only differ with CSV[23], and anything else is an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) val ^= read_id_reg(vcpu, rd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) vcpu->kvm->arch.pfr0_csv2 = csv2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) vcpu->kvm->arch.pfr0_csv3 = csv3 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * cpufeature ID register user accessors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * For now, these registers are immutable for userspace, so no values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * are stored, and for set_id_reg() we don't allow the effective value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * to be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static int __get_id_reg(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) const struct sys_reg_desc *rd, void __user *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) bool raz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) const u64 id = sys_reg_to_index(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) const u64 val = read_id_reg(vcpu, rd, raz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return reg_to_user(uaddr, &val, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static int __set_id_reg(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) const struct sys_reg_desc *rd, void __user *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) bool raz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) const u64 id = sys_reg_to_index(rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) err = reg_from_user(&val, uaddr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* This is what we mean by invariant: you can't change it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (val != read_id_reg(vcpu, rd, raz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) bool raz = sysreg_visible_as_raz(vcpu, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return __get_id_reg(vcpu, rd, uaddr, raz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) bool raz = sysreg_visible_as_raz(vcpu, rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return __set_id_reg(vcpu, rd, uaddr, raz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return __get_id_reg(vcpu, rd, uaddr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) const struct kvm_one_reg *reg, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return __set_id_reg(vcpu, rd, uaddr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return write_to_read_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return write_to_read_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) p->regval = read_sysreg(clidr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) int reg = r->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) vcpu_write_sys_reg(vcpu, p->regval, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) p->regval = vcpu_read_sys_reg(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) u32 csselr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (p->is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return write_to_read_only(vcpu, p, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) p->regval = get_ccsidr(csselr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Guests should not be doing cache operations by set/way at all, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * for this reason, we trap them and attempt to infer the intent, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * that we can flush the entire guest's address space at the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * To prevent this trapping from causing performance problems, let's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * expose the geometry of all data and unified caches (which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * [If guests should attempt to infer aliasing properties from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * geometry (which is not permitted by the architecture), they would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * only do so for virtually indexed caches.]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (!(csselr & 1)) // data or unified cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) p->regval &= ~GENMASK(27, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) /* sys_reg_desc initialiser for known cpufeature ID registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) #define ID_SANITISED(name) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) SYS_DESC(SYS_##name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .access = access_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) .get_user = get_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) .set_user = set_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .visibility = id_visibility, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * (1 <= crm < 8, 0 <= Op2 < 8).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #define ID_UNALLOCATED(crm, op2) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .access = access_raz_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) .get_user = get_raz_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) .set_user = set_raz_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * sys_reg_desc initialiser for known ID registers that we hide from guests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * For now, these are exposed just like unallocated ID regs: they appear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * RAZ for the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) #define ID_HIDDEN(name) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) SYS_DESC(SYS_##name), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) .access = access_raz_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .get_user = get_raz_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .set_user = set_raz_id_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * Architected system registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * Debug handling: We do trap most, if not all debug related system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * registers. The implementation is good enough to ensure that a guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * can use these with minimal performance degradation. The drawback is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * that we don't implement any of the external debug, none of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * OSlock protocol. This should be revisited if we ever encounter a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * more demanding guest...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static const struct sys_reg_desc sys_reg_descs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) { SYS_DESC(SYS_DC_ISW), access_dcsw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) { SYS_DESC(SYS_DC_CSW), access_dcsw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) { SYS_DESC(SYS_DC_CISW), access_dcsw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) DBG_BCR_BVR_WCR_WVR_EL1(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) DBG_BCR_BVR_WCR_WVR_EL1(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) DBG_BCR_BVR_WCR_WVR_EL1(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) DBG_BCR_BVR_WCR_WVR_EL1(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) DBG_BCR_BVR_WCR_WVR_EL1(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) DBG_BCR_BVR_WCR_WVR_EL1(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) DBG_BCR_BVR_WCR_WVR_EL1(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) DBG_BCR_BVR_WCR_WVR_EL1(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) DBG_BCR_BVR_WCR_WVR_EL1(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) DBG_BCR_BVR_WCR_WVR_EL1(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) DBG_BCR_BVR_WCR_WVR_EL1(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) DBG_BCR_BVR_WCR_WVR_EL1(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) DBG_BCR_BVR_WCR_WVR_EL1(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) DBG_BCR_BVR_WCR_WVR_EL1(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) DBG_BCR_BVR_WCR_WVR_EL1(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) DBG_BCR_BVR_WCR_WVR_EL1(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) // DBGDTR[TR]X_EL0 share the same encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * ID regs: all ID_SANITISED() entries here must have corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * entries in arm64_ftr_regs[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* AArch64 mappings of the AArch32 ID registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /* CRm=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) ID_SANITISED(ID_PFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ID_SANITISED(ID_PFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ID_SANITISED(ID_DFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ID_HIDDEN(ID_AFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) ID_SANITISED(ID_MMFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ID_SANITISED(ID_MMFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ID_SANITISED(ID_MMFR2_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ID_SANITISED(ID_MMFR3_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* CRm=2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) ID_SANITISED(ID_ISAR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ID_SANITISED(ID_ISAR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ID_SANITISED(ID_ISAR2_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ID_SANITISED(ID_ISAR3_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ID_SANITISED(ID_ISAR4_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ID_SANITISED(ID_ISAR5_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ID_SANITISED(ID_MMFR4_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ID_SANITISED(ID_ISAR6_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* CRm=3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ID_SANITISED(MVFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) ID_SANITISED(MVFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) ID_SANITISED(MVFR2_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ID_UNALLOCATED(3,3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ID_SANITISED(ID_PFR2_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ID_HIDDEN(ID_DFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ID_SANITISED(ID_MMFR5_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ID_UNALLOCATED(3,7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* AArch64 ID registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* CRm=4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ID_SANITISED(ID_AA64PFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ID_UNALLOCATED(4,2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ID_UNALLOCATED(4,3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) ID_SANITISED(ID_AA64ZFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ID_UNALLOCATED(4,5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ID_UNALLOCATED(4,6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) ID_UNALLOCATED(4,7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* CRm=5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ID_SANITISED(ID_AA64DFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ID_SANITISED(ID_AA64DFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ID_UNALLOCATED(5,2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ID_UNALLOCATED(5,3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) ID_HIDDEN(ID_AA64AFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ID_HIDDEN(ID_AA64AFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ID_UNALLOCATED(5,6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ID_UNALLOCATED(5,7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* CRm=6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) ID_SANITISED(ID_AA64ISAR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) ID_SANITISED(ID_AA64ISAR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) ID_SANITISED(ID_AA64ISAR2_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) ID_UNALLOCATED(6,3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ID_UNALLOCATED(6,4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) ID_UNALLOCATED(6,5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) ID_UNALLOCATED(6,6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ID_UNALLOCATED(6,7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* CRm=7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ID_SANITISED(ID_AA64MMFR0_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) ID_SANITISED(ID_AA64MMFR1_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) ID_SANITISED(ID_AA64MMFR2_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) ID_UNALLOCATED(7,3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ID_UNALLOCATED(7,4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ID_UNALLOCATED(7,5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) ID_UNALLOCATED(7,6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ID_UNALLOCATED(7,7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) { SYS_DESC(SYS_RGSR_EL1), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) { SYS_DESC(SYS_GCR_EL1), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) { SYS_DESC(SYS_TRFCR_EL1), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) PTRAUTH_KEY(APIA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) PTRAUTH_KEY(APIB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) PTRAUTH_KEY(APDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) PTRAUTH_KEY(APDB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) PTRAUTH_KEY(APGA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) { SYS_DESC(SYS_TFSR_EL1), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) { SYS_DESC(SYS_TFSRE0_EL1), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) { PMU_SYS_REG(SYS_PMINTENSET_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) .access = access_pminten, .reg = PMINTENSET_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) .access = access_pminten, .reg = PMINTENSET_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) { SYS_DESC(SYS_LORN_EL1), trap_loregion },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) { SYS_DESC(SYS_LORC_EL1), trap_loregion },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) { SYS_DESC(SYS_LORID_EL1), trap_loregion },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) { SYS_DESC(SYS_CTR_EL0), access_ctr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) .reset = reset_pmcr, .reg = PMCR_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .access = access_pmovs, .reg = PMOVSSET_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) { PMU_SYS_REG(SYS_PMSWINC_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) .access = access_pmswinc, .reg = PMSWINC_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) { PMU_SYS_REG(SYS_PMSELR_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) .access = access_pmselr, .reg = PMSELR_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) { PMU_SYS_REG(SYS_PMCEID0_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) .access = access_pmceid, .reset = NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) { PMU_SYS_REG(SYS_PMCEID1_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) .access = access_pmceid, .reset = NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) { PMU_SYS_REG(SYS_PMCCNTR_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .access = access_pmu_evtyper, .reset = NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .access = access_pmu_evcntr, .reset = NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * in 32bit mode. Here we choose to reset it as zero for consistency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) { PMU_SYS_REG(SYS_PMOVSSET_EL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) .access = access_pmovs, .reg = PMOVSSET_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) { SYS_DESC(SYS_AMCR_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) AMU_AMEVCNTR0_EL0(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) AMU_AMEVCNTR0_EL0(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) AMU_AMEVCNTR0_EL0(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) AMU_AMEVCNTR0_EL0(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) AMU_AMEVCNTR0_EL0(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) AMU_AMEVCNTR0_EL0(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) AMU_AMEVCNTR0_EL0(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) AMU_AMEVCNTR0_EL0(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) AMU_AMEVCNTR0_EL0(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) AMU_AMEVCNTR0_EL0(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) AMU_AMEVCNTR0_EL0(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) AMU_AMEVCNTR0_EL0(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) AMU_AMEVCNTR0_EL0(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) AMU_AMEVCNTR0_EL0(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) AMU_AMEVCNTR0_EL0(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) AMU_AMEVCNTR0_EL0(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) AMU_AMEVTYPER0_EL0(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) AMU_AMEVTYPER0_EL0(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) AMU_AMEVTYPER0_EL0(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) AMU_AMEVTYPER0_EL0(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) AMU_AMEVTYPER0_EL0(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) AMU_AMEVTYPER0_EL0(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) AMU_AMEVTYPER0_EL0(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) AMU_AMEVTYPER0_EL0(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) AMU_AMEVTYPER0_EL0(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) AMU_AMEVTYPER0_EL0(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) AMU_AMEVTYPER0_EL0(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) AMU_AMEVTYPER0_EL0(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) AMU_AMEVTYPER0_EL0(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) AMU_AMEVTYPER0_EL0(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) AMU_AMEVTYPER0_EL0(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) AMU_AMEVTYPER0_EL0(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) AMU_AMEVCNTR1_EL0(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) AMU_AMEVCNTR1_EL0(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) AMU_AMEVCNTR1_EL0(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) AMU_AMEVCNTR1_EL0(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) AMU_AMEVCNTR1_EL0(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) AMU_AMEVCNTR1_EL0(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) AMU_AMEVCNTR1_EL0(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) AMU_AMEVCNTR1_EL0(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) AMU_AMEVCNTR1_EL0(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) AMU_AMEVCNTR1_EL0(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) AMU_AMEVCNTR1_EL0(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) AMU_AMEVCNTR1_EL0(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) AMU_AMEVCNTR1_EL0(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) AMU_AMEVCNTR1_EL0(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) AMU_AMEVCNTR1_EL0(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) AMU_AMEVCNTR1_EL0(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) AMU_AMEVTYPER1_EL0(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) AMU_AMEVTYPER1_EL0(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) AMU_AMEVTYPER1_EL0(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) AMU_AMEVTYPER1_EL0(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) AMU_AMEVTYPER1_EL0(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) AMU_AMEVTYPER1_EL0(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) AMU_AMEVTYPER1_EL0(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) AMU_AMEVTYPER1_EL0(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) AMU_AMEVTYPER1_EL0(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) AMU_AMEVTYPER1_EL0(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) AMU_AMEVTYPER1_EL0(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) AMU_AMEVTYPER1_EL0(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) AMU_AMEVTYPER1_EL0(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) AMU_AMEVTYPER1_EL0(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) AMU_AMEVTYPER1_EL0(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) AMU_AMEVTYPER1_EL0(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /* PMEVCNTRn_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) PMU_PMEVCNTR_EL0(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) PMU_PMEVCNTR_EL0(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) PMU_PMEVCNTR_EL0(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) PMU_PMEVCNTR_EL0(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) PMU_PMEVCNTR_EL0(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) PMU_PMEVCNTR_EL0(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) PMU_PMEVCNTR_EL0(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) PMU_PMEVCNTR_EL0(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) PMU_PMEVCNTR_EL0(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) PMU_PMEVCNTR_EL0(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) PMU_PMEVCNTR_EL0(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) PMU_PMEVCNTR_EL0(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) PMU_PMEVCNTR_EL0(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) PMU_PMEVCNTR_EL0(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) PMU_PMEVCNTR_EL0(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) PMU_PMEVCNTR_EL0(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) PMU_PMEVCNTR_EL0(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) PMU_PMEVCNTR_EL0(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) PMU_PMEVCNTR_EL0(18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) PMU_PMEVCNTR_EL0(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) PMU_PMEVCNTR_EL0(20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) PMU_PMEVCNTR_EL0(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) PMU_PMEVCNTR_EL0(22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) PMU_PMEVCNTR_EL0(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) PMU_PMEVCNTR_EL0(24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) PMU_PMEVCNTR_EL0(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) PMU_PMEVCNTR_EL0(26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) PMU_PMEVCNTR_EL0(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) PMU_PMEVCNTR_EL0(28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) PMU_PMEVCNTR_EL0(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) PMU_PMEVCNTR_EL0(30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /* PMEVTYPERn_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) PMU_PMEVTYPER_EL0(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) PMU_PMEVTYPER_EL0(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) PMU_PMEVTYPER_EL0(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) PMU_PMEVTYPER_EL0(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) PMU_PMEVTYPER_EL0(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) PMU_PMEVTYPER_EL0(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) PMU_PMEVTYPER_EL0(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) PMU_PMEVTYPER_EL0(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) PMU_PMEVTYPER_EL0(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) PMU_PMEVTYPER_EL0(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) PMU_PMEVTYPER_EL0(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) PMU_PMEVTYPER_EL0(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) PMU_PMEVTYPER_EL0(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) PMU_PMEVTYPER_EL0(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) PMU_PMEVTYPER_EL0(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) PMU_PMEVTYPER_EL0(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) PMU_PMEVTYPER_EL0(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) PMU_PMEVTYPER_EL0(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) PMU_PMEVTYPER_EL0(18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) PMU_PMEVTYPER_EL0(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) PMU_PMEVTYPER_EL0(20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) PMU_PMEVTYPER_EL0(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) PMU_PMEVTYPER_EL0(22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) PMU_PMEVTYPER_EL0(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) PMU_PMEVTYPER_EL0(24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) PMU_PMEVTYPER_EL0(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) PMU_PMEVTYPER_EL0(26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) PMU_PMEVTYPER_EL0(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) PMU_PMEVTYPER_EL0(28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) PMU_PMEVTYPER_EL0(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) PMU_PMEVTYPER_EL0(30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * in 32bit mode. Here we choose to reset it as zero for consistency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct sys_reg_params *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (p->is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return ignore_write(vcpu, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * AArch32 debug register mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * None of the other registers share their location, so treat them as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * if they were 64bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) #define DBG_BCR_BVR_WCR_WVR(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* DBGBVRn */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /* DBGBCRn */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /* DBGWVRn */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* DBGWCRn */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) #define DBGBXVR(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * Trapped cp14 registers. We generally ignore most of the external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * debug, on the principle that they don't really make sense to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * guest. Revisit this one day, would this principle change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static const struct sys_reg_desc cp14_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /* DBGDIDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* DBGDTRRXext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) DBG_BCR_BVR_WCR_WVR(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /* DBGDSCRint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) DBG_BCR_BVR_WCR_WVR(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* DBGDCCINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /* DBGDSCRext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) DBG_BCR_BVR_WCR_WVR(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* DBGDTR[RT]Xint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /* DBGDTR[RT]Xext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) DBG_BCR_BVR_WCR_WVR(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) DBG_BCR_BVR_WCR_WVR(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) DBG_BCR_BVR_WCR_WVR(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /* DBGWFAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /* DBGOSECCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) DBG_BCR_BVR_WCR_WVR(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /* DBGVCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) DBG_BCR_BVR_WCR_WVR(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) DBG_BCR_BVR_WCR_WVR(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) DBG_BCR_BVR_WCR_WVR(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) DBG_BCR_BVR_WCR_WVR(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) DBG_BCR_BVR_WCR_WVR(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) DBG_BCR_BVR_WCR_WVR(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) DBG_BCR_BVR_WCR_WVR(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) DBG_BCR_BVR_WCR_WVR(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) DBG_BCR_BVR_WCR_WVR(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* DBGDRAR (32bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) DBGBXVR(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) /* DBGOSLAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) DBGBXVR(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* DBGOSLSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) DBGBXVR(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) DBGBXVR(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) /* DBGOSDLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) DBGBXVR(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /* DBGPRCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) DBGBXVR(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) DBGBXVR(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) DBGBXVR(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) DBGBXVR(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) DBGBXVR(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) DBGBXVR(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) DBGBXVR(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) DBGBXVR(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) DBGBXVR(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) DBGBXVR(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) DBGBXVR(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /* DBGDSAR (32bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* DBGDEVID2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /* DBGDEVID1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* DBGDEVID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* DBGCLAIMSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* DBGCLAIMCLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /* DBGAUTHSTATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /* Trapped cp14 64bit registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static const struct sys_reg_desc cp14_64_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /* DBGDRAR (64bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) { Op1( 0), CRm( 1), .access = trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /* DBGDSAR (64bit) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) { Op1( 0), CRm( 2), .access = trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) /* Macro to expand the PMEVCNTRn register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) #define PMU_PMEVCNTR(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /* PMEVCNTRn */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) { Op1(0), CRn(0b1110), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) access_pmu_evcntr }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /* Macro to expand the PMEVTYPERn register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) #define PMU_PMEVTYPER(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /* PMEVTYPERn */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) { Op1(0), CRn(0b1110), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) access_pmu_evtyper }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * depending on the way they are accessed (as a 32bit or a 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * register).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) static const struct sys_reg_desc cp15_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* ACTLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /* ACTLR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /* TTBCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /* TTBCR2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /* DFSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /* ADFSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /* AIFSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /* DFAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* IFAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * DC{C,I,CI}SW operations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) /* PMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /* PMMIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* PRRR/MAIR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* NMRR/MAIR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /* AMAIR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* AMAIR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) /* ICC_SRE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /* Arch Tmers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /* PMEVCNTRn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) PMU_PMEVCNTR(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) PMU_PMEVCNTR(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) PMU_PMEVCNTR(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) PMU_PMEVCNTR(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) PMU_PMEVCNTR(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) PMU_PMEVCNTR(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) PMU_PMEVCNTR(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) PMU_PMEVCNTR(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) PMU_PMEVCNTR(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) PMU_PMEVCNTR(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) PMU_PMEVCNTR(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) PMU_PMEVCNTR(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) PMU_PMEVCNTR(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) PMU_PMEVCNTR(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) PMU_PMEVCNTR(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) PMU_PMEVCNTR(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) PMU_PMEVCNTR(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) PMU_PMEVCNTR(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) PMU_PMEVCNTR(18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) PMU_PMEVCNTR(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) PMU_PMEVCNTR(20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) PMU_PMEVCNTR(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) PMU_PMEVCNTR(22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) PMU_PMEVCNTR(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) PMU_PMEVCNTR(24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) PMU_PMEVCNTR(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) PMU_PMEVCNTR(26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) PMU_PMEVCNTR(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) PMU_PMEVCNTR(28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) PMU_PMEVCNTR(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) PMU_PMEVCNTR(30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /* PMEVTYPERn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) PMU_PMEVTYPER(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) PMU_PMEVTYPER(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) PMU_PMEVTYPER(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) PMU_PMEVTYPER(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) PMU_PMEVTYPER(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) PMU_PMEVTYPER(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) PMU_PMEVTYPER(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) PMU_PMEVTYPER(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) PMU_PMEVTYPER(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) PMU_PMEVTYPER(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) PMU_PMEVTYPER(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) PMU_PMEVTYPER(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) PMU_PMEVTYPER(12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) PMU_PMEVTYPER(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) PMU_PMEVTYPER(14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) PMU_PMEVTYPER(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) PMU_PMEVTYPER(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) PMU_PMEVTYPER(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) PMU_PMEVTYPER(18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) PMU_PMEVTYPER(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) PMU_PMEVTYPER(20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) PMU_PMEVTYPER(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) PMU_PMEVTYPER(22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) PMU_PMEVTYPER(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) PMU_PMEVTYPER(24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) PMU_PMEVTYPER(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) PMU_PMEVTYPER(26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) PMU_PMEVTYPER(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) PMU_PMEVTYPER(28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) PMU_PMEVTYPER(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) PMU_PMEVTYPER(30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* PMCCFILTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static const struct sys_reg_desc cp15_64_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) bool is_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!is_32 && table[i].reg && !table[i].reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) kvm_err("sys_reg table %p entry %d has lacks reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) table, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) static int match_sys_reg(const void *key, const void *elt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) const unsigned long pval = (unsigned long)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) const struct sys_reg_desc *r = elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return pval - reg_to_encoding(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) const struct sys_reg_desc table[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) unsigned long pval = reg_to_encoding(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) static void perform_access(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct sys_reg_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /* Check for regs disabled by runtime config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (sysreg_hidden(vcpu, r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * Not having an accessor means that we have configured a trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * that we don't know how to handle. This certainly qualifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * as a gross bug that should be fixed right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) BUG_ON(!r->access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) /* Skip instruction if instructed so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (likely(r->access(vcpu, params, r)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) kvm_incr_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * emulate_cp -- tries to match a sys_reg access in a handling table, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * call the corresponding trap handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) * @params: pointer to the descriptor of the access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * @table: array of trap descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * @num: size of the trap descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * Return 0 if the access has been handled, and -1 if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) static int emulate_cp(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) struct sys_reg_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) const struct sys_reg_desc *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) size_t num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return -1; /* Not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) r = find_reg(params, table, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) perform_access(vcpu, params, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /* Not handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) static void unhandled_cp_access(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct sys_reg_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) int cp = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) switch (esr_ec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) case ESR_ELx_EC_CP15_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) case ESR_ELx_EC_CP15_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cp = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) case ESR_ELx_EC_CP14_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) case ESR_ELx_EC_CP14_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) cp = 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) print_sys_reg_msg(params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) "Unsupported guest CP%d access at: %08lx [%08lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * @vcpu: The VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * @run: The kvm_run struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) const struct sys_reg_desc *global,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) size_t nr_global)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct sys_reg_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) u32 esr = kvm_vcpu_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) int Rt = kvm_vcpu_sys_get_rt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) int Rt2 = (esr >> 10) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) params.CRm = (esr >> 1) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) params.is_write = ((esr & 1) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) params.Op0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) params.Op1 = (esr >> 16) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) params.Op2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) params.CRn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * Make a 64-bit value out of Rt and Rt2. As we use the same trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) * backends between AArch32 and AArch64, we get away with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (params.is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * If the table contains a handler, handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) * potential register operation in the case of a read and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * with success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /* Split up the value between registers for the read side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (!params.is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) unhandled_cp_access(vcpu, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * @vcpu: The VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * @run: The kvm_run struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) const struct sys_reg_desc *global,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) size_t nr_global)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) struct sys_reg_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) u32 esr = kvm_vcpu_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) int Rt = kvm_vcpu_sys_get_rt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) params.CRm = (esr >> 1) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) params.regval = vcpu_get_reg(vcpu, Rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) params.is_write = ((esr & 1) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) params.CRn = (esr >> 10) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) params.Op0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) params.Op1 = (esr >> 14) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) params.Op2 = (esr >> 17) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (!params.is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) vcpu_set_reg(vcpu, Rt, params.regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) unhandled_cp_access(vcpu, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) static bool is_imp_def_sys_reg(struct sys_reg_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) // See ARM DDI 0487E.a, section D12.3.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static int emulate_sys_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct sys_reg_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (likely(r)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) perform_access(vcpu, params, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) } else if (is_imp_def_sys_reg(params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) print_sys_reg_msg(params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) "Unsupported guest sys_reg access at: %lx [%08lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * kvm_reset_sys_regs - sets system registers to reset value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * @vcpu: The VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * This function finds the right table above and sets the registers on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * virtual CPU struct to their architecturally defined reset values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (sys_reg_descs[i].reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) * @vcpu: The VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct sys_reg_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) unsigned long esr = kvm_vcpu_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int Rt = kvm_vcpu_sys_get_rt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) trace_kvm_handle_sys_reg(esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) params.Op0 = (esr >> 20) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) params.Op1 = (esr >> 14) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) params.CRn = (esr >> 10) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) params.CRm = (esr >> 1) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) params.Op2 = (esr >> 17) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) params.regval = vcpu_get_reg(vcpu, Rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) params.is_write = !(esr & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) ret = emulate_sys_reg(vcpu, ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (!params.is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) vcpu_set_reg(vcpu, Rt, params.regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * Userspace API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) *****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) static bool index_to_params(u64 id, struct sys_reg_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) switch (id & KVM_REG_SIZE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) case KVM_REG_SIZE_U64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) /* Any unused index bits means it's not valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) | KVM_REG_ARM_COPROC_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) | KVM_REG_ARM64_SYSREG_OP0_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) | KVM_REG_ARM64_SYSREG_OP1_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) | KVM_REG_ARM64_SYSREG_CRN_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) | KVM_REG_ARM64_SYSREG_CRM_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) | KVM_REG_ARM64_SYSREG_OP2_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) const struct sys_reg_desc *find_reg_by_id(u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) struct sys_reg_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) const struct sys_reg_desc table[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (!index_to_params(id, params))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return find_reg(params, table, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Decode an index value, and find the sys_reg_desc entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) struct sys_reg_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) /* We only do sys_reg for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (!index_to_params(id, ¶ms))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) /* Not saved in the sys_reg array and not otherwise accessible? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (r && !(r->reg || r->get_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) r = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * These are the invariant sys_reg registers: we let the guest see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * host versions of these, so they're part of the guest state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * A future CPU may provide a mechanism to present different values to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * the guest, or a future kvm may trap them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) #define FUNCTION_INVARIANT(reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) static void get_##reg(struct kvm_vcpu *v, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) const struct sys_reg_desc *r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) FUNCTION_INVARIANT(midr_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) FUNCTION_INVARIANT(revidr_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) FUNCTION_INVARIANT(clidr_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) FUNCTION_INVARIANT(aidr_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* ->val is filled in by kvm_sys_reg_table_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) static struct sys_reg_desc invariant_sys_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) static int get_invariant_sys_reg(u64 id, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) struct sys_reg_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) ARRAY_SIZE(invariant_sys_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return reg_to_user(uaddr, &r->val, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) static int set_invariant_sys_reg(u64 id, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) struct sys_reg_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) ARRAY_SIZE(invariant_sys_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) err = reg_from_user(&val, uaddr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) /* This is what we mean by invariant: you can't change it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (r->val != val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) static bool is_valid_cache(u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) u32 level, ctype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (val >= CSSELR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) level = (val >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) ctype = (cache_levels >> (level * 3)) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) switch (ctype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) case 0: /* No cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) case 1: /* Instruction cache only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) return (val & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) case 2: /* Data cache only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) case 4: /* Unified cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) return !(val & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) case 3: /* Separate instruction and data caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) default: /* Reserved: we can't know instruction or data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) static int demux_c15_get(u64 id, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) u32 __user *uval = uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) /* Fail if we have unknown bits set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) case KVM_REG_ARM_DEMUX_ID_CCSIDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (KVM_REG_SIZE(id) != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (!is_valid_cache(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return put_user(get_ccsidr(val), uval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static int demux_c15_set(u64 id, void __user *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) u32 val, newval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) u32 __user *uval = uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) /* Fail if we have unknown bits set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) case KVM_REG_ARM_DEMUX_ID_CCSIDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (KVM_REG_SIZE(id) != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (!is_valid_cache(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (get_user(newval, uval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) /* This is also invariant: you can't change it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (newval != get_ccsidr(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) void __user *uaddr = (void __user *)(unsigned long)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return demux_c15_get(reg->id, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) r = index_to_sys_reg_desc(vcpu, reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) return get_invariant_sys_reg(reg->id, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* Check for regs disabled by runtime config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (sysreg_hidden(vcpu, r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (r->get_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return (r->get_user)(vcpu, r, reg, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) const struct sys_reg_desc *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) void __user *uaddr = (void __user *)(unsigned long)reg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) return demux_c15_set(reg->id, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) r = index_to_sys_reg_desc(vcpu, reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) return set_invariant_sys_reg(reg->id, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) /* Check for regs disabled by runtime config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (sysreg_hidden(vcpu, r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (r->set_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return (r->set_user)(vcpu, r, reg, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) static unsigned int num_demux_regs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) unsigned int i, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) for (i = 0; i < CSSELR_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (is_valid_cache(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) static int write_demux_regids(u64 __user *uindices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) for (i = 0; i < CSSELR_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (!is_valid_cache(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if (put_user(val | i, uindices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) uindices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) KVM_REG_ARM64_SYSREG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (!*uind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (put_user(sys_reg_to_index(reg), *uind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) (*uind)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) const struct sys_reg_desc *rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) u64 __user **uind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) unsigned int *total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * Ignore registers we trap but don't save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * and for which no custom user accessor is provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) if (!(rd->reg || rd->get_user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (sysreg_hidden(vcpu, rd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (!copy_reg_to_user(rd, uind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) (*total)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* Assumed ordered tables, see kvm_sys_reg_table_init. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) const struct sys_reg_desc *i2, *end2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) unsigned int total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) i2 = sys_reg_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) while (i2 != end2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) return total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return ARRAY_SIZE(invariant_sys_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) + num_demux_regs()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) + walk_sys_regs(vcpu, (u64 __user *)NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) /* Then give them all the invariant registers' indices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) uindices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) err = walk_sys_regs(vcpu, uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) uindices += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) return write_demux_regids(uindices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) void kvm_sys_reg_table_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) struct sys_reg_desc clidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) /* Make sure tables are unique and in order. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /* We abuse the reset function to overwrite the table itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * If software reads the Cache Type fields from Ctype1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) * upwards, once it has seen a value of 0b000, no caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) * exist at further-out levels of the hierarchy. So, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * example, if Ctype3 is the first Cache Type field with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * value of 0b000, the values of Ctype4 to Ctype7 must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) get_clidr_el1(NULL, &clidr); /* Ugly... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) cache_levels = clidr.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) for (i = 0; i < 7; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (((cache_levels >> (i*3)) & 7) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) /* Clear all higher bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) cache_levels &= (1 << (i*3))-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }