Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Debug and Guest Debug support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2015 - Linaro Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Alex Bennée <alex.bennee@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/kvm_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/kvm_arm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* These are the bits of MDSCR_EL1 we may manipulate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 				DBG_MDSCR_KDE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 				DBG_MDSCR_MDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static DEFINE_PER_CPU(u32, mdcr_el2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * save/restore_guest_debug_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * For some debug operations we need to tweak some guest registers. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * a result we need to save the state of those registers before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * make those modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * after we have restored the preserved value to the main context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 				vcpu->arch.guest_debug_preserved.mdscr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * kvm_arm_init_debug - grab what we need for debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Currently the sole task of this function is to retrieve the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * presumably been set-up by some knowledgeable bootcode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * It is called once per-cpu during CPU hyp initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) void kvm_arm_init_debug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * @vcpu:	the vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * This ensures we will trap access to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  *  - Debug ROM Address (MDCR_EL2_TDRA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  *  - OS related registers (MDCR_EL2_TDOSA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * to the profiling buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 				MDCR_EL2_TPMS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 				MDCR_EL2_TTRF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 				MDCR_EL2_TPMCR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 				MDCR_EL2_TDRA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 				MDCR_EL2_TDOSA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/* Is the VM being debugged by userspace? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (vcpu->guest_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		/* Route all software debug exceptions to EL2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 * Trap debug register access when one of the following is true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 *  - Userspace is using the hardware to debug the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 *  (KVM_GUESTDBG_USE_HW is set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	    !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * kvm_arm_vcpu_init_debug - setup vcpu debug traps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * @vcpu:	the vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * Set vcpu initial mdcr_el2 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	kvm_arm_setup_mdcr_el2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * kvm_arm_setup_debug - set up debug related stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * @vcpu:	the vcpu pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * This is called before each entry into the hypervisor to setup any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * debug related registers. Currently this just ensures we will trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * access to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *  - Debug ROM Address (MDCR_EL2_TDRA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  *  - OS related registers (MDCR_EL2_TDOSA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * Additionally, KVM only traps guest accesses to the debug registers if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * flag on vcpu->arch.flags).  Since the guest must not interfere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * with the hardware state when debugging the guest, we must ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * trapping is enabled whenever we are debugging the guest using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	kvm_arm_setup_mdcr_el2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	/* Is Guest debugging in effect? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (vcpu->guest_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		/* Save guest debug state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		save_guest_debug_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		 * Single Step (ARM ARM D2.12.3 The software step state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		 * machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		 * If we are doing Single Step we need to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 * step has occurred the hypervisor will trap the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		 * debug exception and we return to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		 * If the guest attempts to single step its userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		 * we would have to deal with a trapped exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		 * while in the guest kernel. Because this would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		 * hard to unwind we suppress the guest's ability to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		 * do so by masking MDSCR_EL.SS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		 * This confuses guest debuggers which use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		 * single-step behind the scenes but everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		 * returns to normal once the host is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		 * debugging the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			*vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			mdscr |= DBG_MDSCR_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			mdscr &= ~DBG_MDSCR_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		 * HW Breakpoints and watchpoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		 * We simply switch the debug_ptr to point to our new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		 * external_debug_state which has been populated by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		 * mechanism ensures the registers are updated on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		 * world switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			/* Enable breakpoints/watchpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			mdscr |= DBG_MDSCR_MDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 						&vcpu->arch.debug_ptr->dbg_bcr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 						&vcpu->arch.debug_ptr->dbg_wcr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	BUG_ON(!vcpu->guest_debug &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/* If KDE or MDE are set, perform a full save/restore cycle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	/* Write mdcr_el2 changes since vcpu_load on VHE systems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	trace_kvm_arm_clear_debug(vcpu->guest_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (vcpu->guest_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		restore_guest_debug_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		 * If we were using HW debug we need to restore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		 * debug_ptr to the guest debug state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			kvm_arm_reset_debug_ptr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 						&vcpu->arch.debug_ptr->dbg_bcr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 						&vcpu->arch.debug_ptr->dbg_wcr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	u64 dfr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/* For VHE, there is nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (has_vhe())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	dfr0 = read_sysreg(id_aa64dfr0_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 * If SPE is present on this CPU and is available at current EL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	 * we may need to check if the host state needs to be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* Check if we have TRBE implemented and available at the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			      KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }