Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2012,2013 - ARM Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Derived from arch/arm/kvm/handle_exit.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/esr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/kvm_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <kvm/arm_hypercalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "trace_handle_exit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) typedef int (*exit_handle_fn)(struct kvm_vcpu *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		kvm_inject_vabt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static int handle_hvc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			    kvm_vcpu_hvc_get_imm(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	vcpu->stat.hvc_exit_stat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ret = kvm_hvc_call_handler(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		vcpu_set_reg(vcpu, 0, ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static int handle_smc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 * "If an SMC instruction executed at Non-secure EL1 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * Trap exception, not a Secure Monitor Call exception [...]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * We need to advance the PC after the trap, as it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * otherwise return to the same address...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	vcpu_set_reg(vcpu, 0, ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	kvm_incr_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Guest access to FP/ASIMD registers are routed to this handler only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * when the system doesn't support FP/ASIMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  *		    instruction executed by a guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * @vcpu:	the vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * WFE: Yield the CPU and come back to this vcpu when the scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * decides to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * WFI: Simply call kvm_vcpu_block(), which will halt execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * world-switches and schedule other host processes until there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * incoming IRQ or FIQ to the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		vcpu->stat.wfe_exit_stat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		vcpu->stat.wfi_exit_stat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		kvm_vcpu_block(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	kvm_incr_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * kvm_handle_guest_debug - handle a debug exception instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * @vcpu:	the vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * We route all debug exceptions through the same handler. If both the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * guest and host are using the same debug facilities it will be up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * userspace to re-inject the correct exception for guest delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * @return: 0 (while setting vcpu->run->exit_reason), -1 for error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	u32 esr = kvm_vcpu_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	run->exit_reason = KVM_EXIT_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	run->debug.arch.hsr = esr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	switch (ESR_ELx_EC(esr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	case ESR_ELx_EC_WATCHPT_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		run->debug.arch.far = vcpu->arch.fault.far_el2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	case ESR_ELx_EC_SOFTSTP_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	case ESR_ELx_EC_BREAKPT_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	case ESR_ELx_EC_BKPT32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	case ESR_ELx_EC_BRK64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		kvm_err("%s: un-handled case esr: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			__func__, (unsigned int) esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	u32 esr = kvm_vcpu_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		      esr, esr_get_class_string(esr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int handle_sve(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	/* Until SVE is supported for guests: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * that we can do is give the guest an UNDEF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	kvm_inject_undefined(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static exit_handle_fn arm_exit_handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	[ESR_ELx_EC_HVC32]	= handle_hvc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	[ESR_ELx_EC_SMC32]	= handle_smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	[ESR_ELx_EC_HVC64]	= handle_hvc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	[ESR_ELx_EC_SMC64]	= handle_smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	[ESR_ELx_EC_SVE]	= handle_sve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	u32 esr = kvm_vcpu_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	u8 esr_ec = ESR_ELx_EC(esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	return arm_exit_handlers[esr_ec];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * We may be single-stepping an emulated instruction. If the emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * has been completed in the kernel, we can return to userspace with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * emulation first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	int handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * that fail their condition code check"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (!kvm_condition_valid(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		kvm_incr_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		exit_handle_fn exit_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		exit_handler = kvm_get_exit_handler(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		handled = exit_handler(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * proper exit to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	exception_index = ARM_EXCEPTION_CODE(exception_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	switch (exception_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	case ARM_EXCEPTION_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	case ARM_EXCEPTION_EL1_SERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	case ARM_EXCEPTION_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return handle_trap_exceptions(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	case ARM_EXCEPTION_HYP_GONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		 * EL2 has been reset to the hyp-stub. This happens when a guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		 * is pre-empted by kvm_reboot()'s shutdown call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	case ARM_EXCEPTION_IL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		 * We attempted an illegal exception return.  Guest state must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		 * have been corrupted somehow.  Give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		kvm_pr_unimpl("Unsupported exception type: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			      exception_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* For exit types that need handling before we can be preempted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (ARM_SERROR_PENDING(exception_index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			u64 disr = kvm_vcpu_get_disr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			kvm_inject_vabt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	exception_index = ARM_EXCEPTION_CODE(exception_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }