^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Fault injection for both 32 and 64bit guests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012,2013 - ARM Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on arch/arm/kvm/emulate.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2012 - Virtual Open Systems and Columbia University
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author: Christoffer Dall <c.dall@virtualopensystems.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/esr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long cpsr = *vcpu_cpsr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u32 esr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) KVM_ARM64_PENDING_EXCEPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Build an {i,d}abort, depending on the level and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * instruction set. Report an external synchronous abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (kvm_vcpu_trap_il_is32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) esr |= ESR_ELx_IL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Here, the guest runs in AArch64 mode when in EL1. If we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * an AArch32 fault, it means we managed to trap an EL0 fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!is_iabt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void inject_undef64(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) KVM_ARM64_PENDING_EXCEPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Build an unknown exception, depending on the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (kvm_vcpu_trap_il_is32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) esr |= ESR_ELx_IL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define DFSR_FSC_EXTABT_LPAE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define DFSR_FSC_EXTABT_nLPAE 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define DFSR_LPAE BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define TTBCR_EAE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void inject_undef32(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_UND |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) KVM_ARM64_PENDING_EXCEPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Modelled after TakeDataAbortException() and TakePrefetchAbortException
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * pseudocode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u64 far;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 fsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Give the guest an IMPLEMENTATION DEFINED exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* no need to shuffle FS[4] into DFSR[10] as its 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) fsr = DFSR_FSC_EXTABT_nLPAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) far = vcpu_read_sys_reg(vcpu, FAR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (is_pabt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) KVM_ARM64_PENDING_EXCEPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) far &= GENMASK(31, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) far |= (u64)addr << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } else { /* !iabt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) KVM_ARM64_PENDING_EXCEPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) far &= GENMASK(63, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) far |= addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) vcpu_write_sys_reg(vcpu, far, FAR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * kvm_inject_dabt - inject a data abort into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @vcpu: The VCPU to receive the data abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @addr: The address to report in the DFAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * It is assumed that this code is called from the VCPU thread and that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * VCPU therefore is not currently executing guest code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (vcpu_el1_is_32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) inject_abt32(vcpu, false, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) inject_abt64(vcpu, false, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * kvm_inject_pabt - inject a prefetch abort into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @vcpu: The VCPU to receive the prefetch abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @addr: The address to report in the DFAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * It is assumed that this code is called from the VCPU thread and that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * VCPU therefore is not currently executing guest code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (vcpu_el1_is_32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) inject_abt32(vcpu, true, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) inject_abt64(vcpu, true, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * kvm_inject_undefined - inject an undefined instruction into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @vcpu: The vCPU in which to inject the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * It is assumed that this code is called from the VCPU thread and that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * VCPU therefore is not currently executing guest code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void kvm_inject_undefined(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (vcpu_el1_is_32bit(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) inject_undef32(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) inject_undef64(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *vcpu_hcr(vcpu) |= HCR_VSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * kvm_inject_vabt - inject an async abort / SError into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @vcpu: The VCPU to receive the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * It is assumed that this code is called from the VCPU thread and that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * VCPU therefore is not currently executing guest code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * the remaining ISS all-zeros so that this error is not interpreted as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * value, so the CPU generates an imp-def value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void kvm_inject_vabt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }