^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright IBM Corp. 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2010-2011 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Hollis Blanchard <hollisb@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Scott Wood <scottwood@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Varun Sethi <varun.sethi@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/dbell.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "timing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "booke.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "trace_booke.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long kvmppc_booke_handlers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct kvm_stats_debugfs_item debugfs_entries[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) VCPU_STAT("mmio", mmio_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) VCPU_STAT("sig", signal_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) VCPU_STAT("itlb_r", itlb_real_miss_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) VCPU_STAT("itlb_v", itlb_virt_miss_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) VCPU_STAT("sysc", syscall_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) VCPU_STAT("isi", isi_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) VCPU_STAT("dsi", dsi_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) VCPU_STAT("inst_emu", emulated_inst_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) VCPU_STAT("dec", dec_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) VCPU_STAT("ext_intr", ext_intr_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) VCPU_STAT("halt_successful_poll", halt_successful_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) VCPU_STAT("halt_wakeup", halt_wakeup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) VCPU_STAT("doorbell", dbell_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) VCPU_STAT("guest doorbell", gdbell_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) VM_STAT("remote_tlb_flush", remote_tlb_flush),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) { NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* TODO: use vcpu_printf() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) vcpu->arch.shared->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) vcpu->arch.regs.ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) vcpu->arch.shared->srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) for (i = 0; i < 32; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) kvmppc_get_gpr(vcpu, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) kvmppc_get_gpr(vcpu, i+1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) kvmppc_get_gpr(vcpu, i+2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kvmppc_get_gpr(vcpu, i+3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) enable_kernel_spe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) kvmppc_save_guest_spe(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) disable_kernel_spe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) vcpu->arch.shadow_msr &= ~MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) enable_kernel_spe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) kvmppc_load_guest_spe(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) disable_kernel_spe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) vcpu->arch.shadow_msr |= MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (vcpu->arch.shared->msr & MSR_SPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!(vcpu->arch.shadow_msr & MSR_SPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) kvmppc_vcpu_enable_spe(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else if (vcpu->arch.shadow_msr & MSR_SPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) kvmppc_vcpu_disable_spe(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Load up guest vcpu FP state if it's needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * It also set the MSR_FP in thread so that host know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * we're holding FPU, and then host can help to save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * guest vcpu FP state if other threads require to use FPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * This simulates an FP unavailable fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * It requires to be called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!(current->thread.regs->msr & MSR_FP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enable_kernel_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) load_fp_state(&vcpu->arch.fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) disable_kernel_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) current->thread.fp_save_area = &vcpu->arch.fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) current->thread.regs->msr |= MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Save guest vcpu FP state into thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * It requires to be called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (current->thread.regs->msr & MSR_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) giveup_fpu(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) current->thread.fp_save_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* We always treat the FP bit as enabled from the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) perspective, so only need to adjust the shadow MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) vcpu->arch.shadow_msr &= ~MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Simulate AltiVec unavailable fault to load guest state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * from thread to AltiVec unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * It requires to be called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!(current->thread.regs->msr & MSR_VEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) enable_kernel_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) load_vr_state(&vcpu->arch.vr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) disable_kernel_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) current->thread.vr_save_area = &vcpu->arch.vr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) current->thread.regs->msr |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Save guest vcpu AltiVec state into thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * It requires to be called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (current->thread.regs->msr & MSR_VEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) giveup_altivec(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) current->thread.vr_save_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Synchronize guest's desire to get debug interrupts into shadow MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #ifndef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) vcpu->arch.shadow_msr &= ~MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Force enable debug interrupts when user space wants to debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (vcpu->guest_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Since there is no shadow MSR, sync MSR_DE into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * visible MSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) vcpu->arch.shared->msr |= MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) vcpu->arch.shadow_msr |= MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) vcpu->arch.shared->msr &= ~MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Helper function for "full" MSR writes. No need to call this if only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * EE/CE/ME/DE/RI are changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 old_msr = vcpu->arch.shared->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) new_msr |= MSR_GS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) vcpu->arch.shared->msr = new_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) kvmppc_mmu_msr_notify(vcpu, old_msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) kvmppc_vcpu_sync_spe(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kvmppc_vcpu_sync_fpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) kvmppc_vcpu_sync_debug(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) trace_kvm_booke_queue_irqprio(vcpu, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) set_bit(priority, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ulong dear_flags, ulong esr_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) vcpu->arch.queued_dear = dear_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) vcpu->arch.queued_esr = esr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ulong dear_flags, ulong esr_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) vcpu->arch.queued_dear = dear_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) vcpu->arch.queued_esr = esr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) vcpu->arch.queued_esr = esr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ulong esr_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) vcpu->arch.queued_dear = dear_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) vcpu->arch.queued_esr = esr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) vcpu->arch.queued_esr = esr_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct kvm_interrupt *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) kvmppc_booke_queue_irqprio(vcpu, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) kvmppc_set_srr0(vcpu, srr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) kvmppc_set_srr1(vcpu, srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) vcpu->arch.csrr0 = srr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) vcpu->arch.csrr1 = srr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) vcpu->arch.dsrr0 = srr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) vcpu->arch.dsrr1 = srr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) set_guest_csrr(vcpu, srr0, srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) vcpu->arch.mcsrr0 = srr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) vcpu->arch.mcsrr1 = srr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Deliver the interrupt of the corresponding priority, if possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int allowed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ulong msr_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) bool update_esr = false, update_dear = false, update_epr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ulong crit_raw = vcpu->arch.shared->critical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bool crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) bool keep_irq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) enum int_class int_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ulong new_msr = vcpu->arch.shared->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Truncate crit indicators in 32 bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!(vcpu->arch.shared->msr & MSR_SF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) crit_raw &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) crit_r1 &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Critical section when crit == r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) crit = (crit_raw == crit_r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* ... and we're in supervisor mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) priority = BOOKE_IRQPRIO_EXTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) keep_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) update_epr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) switch (priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) case BOOKE_IRQPRIO_DTLB_MISS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) case BOOKE_IRQPRIO_DATA_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case BOOKE_IRQPRIO_ALIGNMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) update_dear = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) case BOOKE_IRQPRIO_INST_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case BOOKE_IRQPRIO_PROGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) update_esr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case BOOKE_IRQPRIO_ITLB_MISS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) case BOOKE_IRQPRIO_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) case BOOKE_IRQPRIO_FP_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #ifdef CONFIG_SPE_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) case BOOKE_IRQPRIO_SPE_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case BOOKE_IRQPRIO_SPE_FP_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case BOOKE_IRQPRIO_SPE_FP_ROUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case BOOKE_IRQPRIO_AP_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) allowed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) msr_mask = MSR_CE | MSR_ME | MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int_class = INT_CLASS_NONCRIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case BOOKE_IRQPRIO_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case BOOKE_IRQPRIO_CRITICAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) case BOOKE_IRQPRIO_DBELL_CRIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) allowed = vcpu->arch.shared->msr & MSR_CE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) allowed = allowed && !crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) msr_mask = MSR_ME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int_class = INT_CLASS_CRIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) case BOOKE_IRQPRIO_MACHINE_CHECK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) allowed = vcpu->arch.shared->msr & MSR_ME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) allowed = allowed && !crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int_class = INT_CLASS_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) case BOOKE_IRQPRIO_DECREMENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case BOOKE_IRQPRIO_FIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) keep_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) case BOOKE_IRQPRIO_EXTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) case BOOKE_IRQPRIO_DBELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) allowed = vcpu->arch.shared->msr & MSR_EE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) allowed = allowed && !crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) msr_mask = MSR_CE | MSR_ME | MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int_class = INT_CLASS_NONCRIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) case BOOKE_IRQPRIO_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) allowed = vcpu->arch.shared->msr & MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) allowed = allowed && !crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) msr_mask = MSR_ME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int_class = INT_CLASS_DBG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int_class = INT_CLASS_CRIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (allowed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) switch (int_class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) case INT_CLASS_NONCRIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) set_guest_srr(vcpu, vcpu->arch.regs.nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) vcpu->arch.shared->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) case INT_CLASS_CRIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) set_guest_csrr(vcpu, vcpu->arch.regs.nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) vcpu->arch.shared->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case INT_CLASS_DBG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) vcpu->arch.shared->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) case INT_CLASS_MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) vcpu->arch.shared->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) vcpu->arch.regs.nip = vcpu->arch.ivpr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) vcpu->arch.ivor[priority];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (update_esr == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (update_dear == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (update_epr == true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) kvmppc_mpic_set_epr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) new_msr &= msr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #if defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (vcpu->arch.epcr & SPRN_EPCR_ICM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) new_msr |= MSR_CM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) kvmppc_set_msr(vcpu, new_msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!keep_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) clear_bit(priority, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * If an interrupt is pending but masked, raise a guest doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * so that we are notified when the guest enables the relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * MSR bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Return the number of jiffies until the next timeout. If the timeout is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * because the larger value can break the timer APIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u64 tb, wdt_tb, wdt_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u64 nr_jiffies = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) u32 period = TCR_GET_WP(vcpu->arch.tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) wdt_tb = 1ULL << (63 - period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tb = get_tb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * The watchdog timeout will hapeen when TB bit corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * to watchdog will toggle from 0 to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (tb & wdt_tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) wdt_ticks = wdt_tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Convert timebase ticks to jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) nr_jiffies = wdt_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (do_div(nr_jiffies, tb_ticks_per_jiffy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) nr_jiffies++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static void arm_next_watchdog(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned long nr_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * If TSR_ENW and TSR_WIS are not set then no need to exit to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * userspace, so clear the KVM_REQ_WATCHDOG request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) nr_jiffies = watchdog_next_timeout(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * then do not run the watchdog timer as this can break timer APIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) del_timer(&vcpu->arch.wdt_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) void kvmppc_watchdog_func(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 tsr, new_tsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) new_tsr = tsr = vcpu->arch.tsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) final = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Time out event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (tsr & TSR_ENW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (tsr & TSR_WIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) final = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) new_tsr = tsr | TSR_WIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) new_tsr = tsr | TSR_ENW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (new_tsr & TSR_WIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * If this is final watchdog expiry and some action is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * then exit to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) vcpu->arch.watchdog_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Stop running the watchdog timer after final expiration to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * prevent the host from being flooded with timers if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * guest sets a short period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * Timers will resume when TSR/TCR is updated next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) arm_next_watchdog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void update_timer_ints(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) kvmppc_core_queue_dec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) kvmppc_core_dequeue_dec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) kvmppc_core_queue_watchdog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) kvmppc_core_dequeue_watchdog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long *pending = &vcpu->arch.pending_exceptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned int priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) priority = __ffs(*pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) while (priority < BOOKE_IRQPRIO_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (kvmppc_booke_irqprio_deliver(vcpu, priority))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) priority = find_next_bit(pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) BITS_PER_BYTE * sizeof(*pending),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) priority + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Tell the guest about our interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) vcpu->arch.shared->int_pending = !!*pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Check pending exceptions and deliver one, if possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) WARN_ON_ONCE(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) kvmppc_core_check_exceptions(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (kvm_request_pending(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Exception delivery raised request; start over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (vcpu->arch.shared->msr & MSR_WE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) kvm_vcpu_block(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) kvm_clear_request(KVM_REQ_UNHALT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int r = 1; /* Indicate we want to get back into the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) update_timer_ints(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) kvmppc_core_flush_tlb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) vcpu->run->epr.epr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) vcpu->arch.epr_needed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) vcpu->run->exit_reason = KVM_EXIT_EPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int ret, s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct debug_reg debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!vcpu->arch.sane) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) s = kvmppc_prepare_to_enter(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (s <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ret = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* interrupts now hard-disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Save userspace FPU state in stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) enable_kernel_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Since we can't trap on MSR_FP in GS-mode, we consider the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * as always using the FPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) kvmppc_load_guest_fp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Save userspace AltiVec state in stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) enable_kernel_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * as always using the AltiVec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) kvmppc_load_guest_altivec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* Switch to guest debug context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) debug = vcpu->arch.dbg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) switch_booke_debug_regs(&debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) debug = current->thread.debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) current->thread.debug = vcpu->arch.dbg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) kvmppc_fix_ee_before_entry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ret = __kvmppc_vcpu_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* No need for guest_exit. It's done in handle_exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) We also get here with interrupts enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Switch back to user space debug context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) switch_booke_debug_regs(&debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) current->thread.debug = debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) kvmppc_save_guest_fp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) kvmppc_save_guest_altivec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) vcpu->mode = OUTSIDE_GUEST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int emulation_exit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) er = kvmppc_emulate_instruction(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) switch (er) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) case EMULATE_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* don't overwrite subtypes, just account kvm_stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Future optimization: only reload non-volatiles if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * they were actually modified by emulation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return RESUME_GUEST_NV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) case EMULATE_AGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* For debugging, encode the failing instruction and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * report it to userspace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) kvmppc_core_queue_program(vcpu, ESR_PIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case EMULATE_EXIT_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) u32 dbsr = vcpu->arch.dbsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (vcpu->guest_debug == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * Debug resources belong to Guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Imprecise debug event is not injected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (dbsr & DBSR_IDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dbsr &= ~DBSR_IDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (!dbsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) kvmppc_core_queue_debug(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* Inject a program interrupt if trap debug is not allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) kvmppc_core_queue_program(vcpu, ESR_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Debug resource owned by userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Clear guest dbsr (vcpu->arch.dbsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) vcpu->arch.dbsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) run->debug.arch.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) run->debug.arch.address = vcpu->arch.regs.nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) run->debug.arch.address = dbg_reg->dac1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) run->debug.arch.address = dbg_reg->dac2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static void kvmppc_fill_pt_regs(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ulong r1, ip, msr, lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) asm("mr %0, 1" : "=r"(r1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) asm("mflr %0" : "=r"(lr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) asm("mfmsr %0" : "=r"(msr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) asm("bl 1f; 1: mflr %0" : "=r"(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) memset(regs, 0, sizeof(*regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) regs->gpr[1] = r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) regs->nip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) regs->msr = msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) regs->link = lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * For interrupts needed to be handled by host interrupt handlers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * corresponding host handler are called from here in similar way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * (but not exact) as they are called from low level handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * (such as from arch/powerpc/kernel/head_fsl_booke.S).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned int exit_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct pt_regs regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) switch (exit_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case BOOKE_INTERRUPT_EXTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kvmppc_fill_pt_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) do_IRQ(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) case BOOKE_INTERRUPT_DECREMENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) kvmppc_fill_pt_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) timer_interrupt(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #if defined(CONFIG_PPC_DOORBELL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) case BOOKE_INTERRUPT_DOORBELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) kvmppc_fill_pt_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) doorbell_exception(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) case BOOKE_INTERRUPT_MACHINE_CHECK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) kvmppc_fill_pt_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) performance_monitor_exception(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) case BOOKE_INTERRUPT_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) kvmppc_fill_pt_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) #ifdef CONFIG_BOOKE_WDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) WatchdogException(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) unknown_exception(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) case BOOKE_INTERRUPT_CRITICAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) kvmppc_fill_pt_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) unknown_exception(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) case BOOKE_INTERRUPT_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* Save DBSR before preemption is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) vcpu->arch.dbsr = mfspr(SPRN_DBSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) kvmppc_clear_dbsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) enum emulation_result emulated, u32 last_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) switch (emulated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case EMULATE_AGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pr_debug("%s: load instruction from guest address %lx failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) __func__, vcpu->arch.regs.nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* For debugging, encode the failing instruction and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * report it to userspace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) vcpu->run->hw.hardware_exit_reason |= last_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) kvmppc_core_queue_program(vcpu, ESR_PIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * kvmppc_handle_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u32 last_inst = KVM_INST_FETCH_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) enum emulation_result emulated = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* update before a new last_exit_type is rewritten */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) kvmppc_update_timing_stats(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* restart interrupts if they were meant for the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) kvmppc_restart_interrupt(vcpu, exit_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * get last instruction before being preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) switch (exit_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case BOOKE_INTERRUPT_DATA_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) case BOOKE_INTERRUPT_DTLB_MISS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case BOOKE_INTERRUPT_HV_PRIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) case BOOKE_INTERRUPT_PROGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* SW breakpoints arrive as illegal instructions on HV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) trace_kvm_exit(exit_nr, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) guest_exit_irqoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) run->exit_reason = KVM_EXIT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) run->ready_for_interrupt_injection = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (emulated != EMULATE_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) switch (exit_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) case BOOKE_INTERRUPT_MACHINE_CHECK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) kvmppc_dump_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* For debugging, send invalid exit reason to user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) run->hw.hardware_exit_reason = ~1ULL << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) case BOOKE_INTERRUPT_EXTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) case BOOKE_INTERRUPT_DECREMENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kvmppc_account_exit(vcpu, DEC_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) case BOOKE_INTERRUPT_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) case BOOKE_INTERRUPT_DOORBELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) kvmppc_account_exit(vcpu, DBELL_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) kvmppc_account_exit(vcpu, GDBELL_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * We are here because there is a pending guest interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * which could not be delivered as MSR_CE or MSR_ME was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * set. Once we break from here we will retry delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) case BOOKE_INTERRUPT_GUEST_DBELL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) kvmppc_account_exit(vcpu, GDBELL_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * We are here because there is a pending guest interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * which could not be delivered as MSR_EE was not set. Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * we break from here we will retry delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) case BOOKE_INTERRUPT_HV_PRIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) r = emulation_exit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) case BOOKE_INTERRUPT_PROGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * We are here because of an SW breakpoint instr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * so lets return to host to handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) r = kvmppc_handle_debug(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) run->exit_reason = KVM_EXIT_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) kvmppc_account_exit(vcpu, DEBUG_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * Program traps generated by user-level software must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * be handled by the guest kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * In GS mode, hypervisor privileged instructions trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * actual program interrupts, handled by the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) kvmppc_account_exit(vcpu, USR_PR_INST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) r = emulation_exit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) case BOOKE_INTERRUPT_FP_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) kvmppc_account_exit(vcpu, FP_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) case BOOKE_INTERRUPT_SPE_UNAVAIL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (vcpu->arch.shared->msr & MSR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) kvmppc_vcpu_enable_spe(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) kvmppc_booke_queue_irqprio(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) BOOKE_IRQPRIO_SPE_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) case BOOKE_INTERRUPT_SPE_FP_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case BOOKE_INTERRUPT_SPE_FP_ROUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) #elif defined(CONFIG_SPE_POSSIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) case BOOKE_INTERRUPT_SPE_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * Guest wants SPE, but host kernel doesn't support it. Send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * an "unimplemented operation" program check to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * These really should never happen without CONFIG_SPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * as we should never enable the real MSR[SPE] in the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) case BOOKE_INTERRUPT_SPE_FP_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) case BOOKE_INTERRUPT_SPE_FP_ROUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) __func__, exit_nr, vcpu->arch.regs.nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) run->hw.hardware_exit_reason = exit_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) #endif /* CONFIG_SPE_POSSIBLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * see kvmppc_core_check_processor_compat().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) case BOOKE_INTERRUPT_DATA_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) vcpu->arch.fault_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) kvmppc_account_exit(vcpu, DSI_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) case BOOKE_INTERRUPT_INST_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) kvmppc_account_exit(vcpu, ISI_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) case BOOKE_INTERRUPT_ALIGNMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) vcpu->arch.fault_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) case BOOKE_INTERRUPT_HV_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (!(vcpu->arch.shared->msr & MSR_PR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * hcall from guest userspace -- send privileged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * instruction program check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) kvmppc_core_queue_program(vcpu, ESR_PPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) case BOOKE_INTERRUPT_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!(vcpu->arch.shared->msr & MSR_PR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* KVM PV hypercalls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Guest syscalls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) kvmppc_account_exit(vcpu, SYSCALL_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) case BOOKE_INTERRUPT_DTLB_MISS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unsigned long eaddr = vcpu->arch.fault_dear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int gtlb_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) gpa_t gpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) gfn_t gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #ifdef CONFIG_KVM_E500V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!(vcpu->arch.shared->msr & MSR_PR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) kvmppc_map_magic(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* Check the guest TLB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (gtlb_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* The guest didn't have a mapping for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) kvmppc_core_queue_dtlb_miss(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) vcpu->arch.fault_dear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) vcpu->arch.fault_esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) kvmppc_mmu_dtlb_miss(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) gfn = gpaddr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /* The guest TLB had a mapping, but the shadow TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * didn't, and it is RAM. This could be because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * a) the entry is mapping the host kernel, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * b) the guest used a large mapping which we're faking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * Either way, we need to satisfy the fault without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * invoking the guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /* Guest has mapped and accessed a page which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * actually RAM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) vcpu->arch.paddr_accessed = gpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) vcpu->arch.vaddr_accessed = eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) r = kvmppc_emulate_mmio(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) kvmppc_account_exit(vcpu, MMIO_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) case BOOKE_INTERRUPT_ITLB_MISS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) unsigned long eaddr = vcpu->arch.regs.nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) gpa_t gpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) gfn_t gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int gtlb_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* Check the guest TLB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (gtlb_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* The guest didn't have a mapping for it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) kvmppc_mmu_itlb_miss(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) gfn = gpaddr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* The guest TLB had a mapping, but the shadow TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * didn't. This could be because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * a) the entry is mapping the host kernel, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * b) the guest used a large mapping which we're faking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * Either way, we need to satisfy the fault without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * invoking the guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /* Guest mapped and leaped at non-RAM! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) case BOOKE_INTERRUPT_DEBUG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) r = kvmppc_handle_debug(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (r == RESUME_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) run->exit_reason = KVM_EXIT_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) kvmppc_account_exit(vcpu, DEBUG_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) printk(KERN_EMERG "exit_nr %d\n", exit_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * To avoid clobbering exit_reason, only check for signals if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * aren't already exiting to userspace for some other reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (!(r & RESUME_HOST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) s = kvmppc_prepare_to_enter(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (s <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* interrupts now hard-disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) kvmppc_fix_ee_before_entry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) kvmppc_load_guest_fp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) kvmppc_load_guest_altivec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) u32 old_tsr = vcpu->arch.tsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) vcpu->arch.tsr = new_tsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) arm_next_watchdog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) update_timer_ints(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* setup watchdog timer once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spin_lock_init(&vcpu->arch.wdt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * Clear DBSR.MRR to avoid guest debug interrupt as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * this is of host interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) mtspr(SPRN_DBSR, DBSR_MRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) del_timer_sync(&vcpu->arch.wdt_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) regs->pc = vcpu->arch.regs.nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) regs->cr = kvmppc_get_cr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) regs->ctr = vcpu->arch.regs.ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) regs->lr = vcpu->arch.regs.link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) regs->xer = kvmppc_get_xer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) regs->msr = vcpu->arch.shared->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) regs->srr0 = kvmppc_get_srr0(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) regs->srr1 = kvmppc_get_srr1(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) regs->pid = vcpu->arch.pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) regs->sprg0 = kvmppc_get_sprg0(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) regs->sprg1 = kvmppc_get_sprg1(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) regs->sprg2 = kvmppc_get_sprg2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) regs->sprg3 = kvmppc_get_sprg3(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) regs->sprg4 = kvmppc_get_sprg4(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) regs->sprg5 = kvmppc_get_sprg5(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) regs->sprg6 = kvmppc_get_sprg6(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) regs->sprg7 = kvmppc_get_sprg7(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) vcpu->arch.regs.nip = regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) kvmppc_set_cr(vcpu, regs->cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) vcpu->arch.regs.ctr = regs->ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) vcpu->arch.regs.link = regs->lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) kvmppc_set_xer(vcpu, regs->xer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) kvmppc_set_msr(vcpu, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) kvmppc_set_srr0(vcpu, regs->srr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) kvmppc_set_srr1(vcpu, regs->srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) kvmppc_set_pid(vcpu, regs->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) kvmppc_set_sprg0(vcpu, regs->sprg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) kvmppc_set_sprg1(vcpu, regs->sprg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) kvmppc_set_sprg2(vcpu, regs->sprg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) kvmppc_set_sprg3(vcpu, regs->sprg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) kvmppc_set_sprg4(vcpu, regs->sprg4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) kvmppc_set_sprg5(vcpu, regs->sprg5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) kvmppc_set_sprg6(vcpu, regs->sprg6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) kvmppc_set_sprg7(vcpu, regs->sprg7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static void get_sregs_base(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) u64 tb = get_tb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) sregs->u.e.features |= KVM_SREGS_E_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) sregs->u.e.csrr0 = vcpu->arch.csrr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) sregs->u.e.csrr1 = vcpu->arch.csrr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) sregs->u.e.mcsr = vcpu->arch.mcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) sregs->u.e.esr = kvmppc_get_esr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) sregs->u.e.dear = kvmppc_get_dar(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) sregs->u.e.tsr = vcpu->arch.tsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) sregs->u.e.tcr = vcpu->arch.tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) sregs->u.e.tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sregs->u.e.vrsave = vcpu->arch.vrsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static int set_sregs_base(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) vcpu->arch.csrr0 = sregs->u.e.csrr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) vcpu->arch.csrr1 = sregs->u.e.csrr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) vcpu->arch.mcsr = sregs->u.e.mcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) kvmppc_set_esr(vcpu, sregs->u.e.esr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) kvmppc_set_dar(vcpu, sregs->u.e.dear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) vcpu->arch.vrsave = sregs->u.e.vrsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) vcpu->arch.dec = sregs->u.e.dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) kvmppc_emulate_dec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void get_sregs_arch206(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) sregs->u.e.features |= KVM_SREGS_E_ARCH206;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) sregs->u.e.pir = vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) sregs->u.e.decar = vcpu->arch.decar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) sregs->u.e.ivpr = vcpu->arch.ivpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static int set_sregs_arch206(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (sregs->u.e.pir != vcpu->vcpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) vcpu->arch.decar = sregs->u.e.decar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) vcpu->arch.ivpr = sregs->u.e.ivpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) sregs->u.e.features |= KVM_SREGS_E_IVOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) sregs->pvr = vcpu->arch.pvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) get_sregs_base(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) get_sregs_arch206(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (vcpu->arch.pvr != sregs->pvr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ret = set_sregs_base(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ret = set_sregs_arch206(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) case KVM_REG_PPC_IAC1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) case KVM_REG_PPC_IAC2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) #if CONFIG_PPC_ADV_DEBUG_IACS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) case KVM_REG_PPC_IAC3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) case KVM_REG_PPC_IAC4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) case KVM_REG_PPC_DAC1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) case KVM_REG_PPC_DAC2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) case KVM_REG_PPC_EPR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) u32 epr = kvmppc_get_epr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) *val = get_reg_val(id, epr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) #if defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) case KVM_REG_PPC_EPCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) *val = get_reg_val(id, vcpu->arch.epcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case KVM_REG_PPC_TCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) *val = get_reg_val(id, vcpu->arch.tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) case KVM_REG_PPC_TSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) *val = get_reg_val(id, vcpu->arch.tsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) case KVM_REG_PPC_DEBUG_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) case KVM_REG_PPC_VRSAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) *val = get_reg_val(id, vcpu->arch.vrsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) case KVM_REG_PPC_IAC1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case KVM_REG_PPC_IAC2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) #if CONFIG_PPC_ADV_DEBUG_IACS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) case KVM_REG_PPC_IAC3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) case KVM_REG_PPC_IAC4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case KVM_REG_PPC_DAC1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case KVM_REG_PPC_DAC2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case KVM_REG_PPC_EPR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) u32 new_epr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) kvmppc_set_epr(vcpu, new_epr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) #if defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) case KVM_REG_PPC_EPCR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) u32 new_epcr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) kvmppc_set_epcr(vcpu, new_epcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) case KVM_REG_PPC_OR_TSR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) u32 tsr_bits = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) kvmppc_set_tsr_bits(vcpu, tsr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) case KVM_REG_PPC_CLEAR_TSR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) u32 tsr_bits = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) kvmppc_clr_tsr_bits(vcpu, tsr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) case KVM_REG_PPC_TSR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) u32 tsr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) kvmppc_set_tsr(vcpu, tsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) case KVM_REG_PPC_TCR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) u32 tcr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) kvmppc_set_tcr(vcpu, tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) case KVM_REG_PPC_VRSAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) vcpu->arch.vrsave = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) struct kvm_translation *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) r = kvmppc_core_vcpu_translate(vcpu, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) int kvmppc_core_prepare_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) void kvmppc_core_commit_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) const struct kvm_memory_slot *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) const struct kvm_memory_slot *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) #if defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) vcpu->arch.epcr = new_epcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (vcpu->arch.epcr & SPRN_EPCR_ICM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) vcpu->arch.tcr = new_tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) arm_next_watchdog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) update_timer_ints(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) set_bits(tsr_bits, &vcpu->arch.tsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) clear_bits(tsr_bits, &vcpu->arch.tsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * We may have stopped the watchdog due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * being stuck on final expiration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (tsr_bits & (TSR_ENW | TSR_WIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) arm_next_watchdog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) update_timer_ints(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (vcpu->arch.tcr & TCR_ARE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) vcpu->arch.dec = vcpu->arch.decar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) kvmppc_emulate_dec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) kvmppc_set_tsr_bits(vcpu, TSR_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) uint64_t addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) dbg_reg->dbcr0 |= DBCR0_IAC1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) dbg_reg->iac1 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) dbg_reg->dbcr0 |= DBCR0_IAC2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) dbg_reg->iac2 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) #if CONFIG_PPC_ADV_DEBUG_IACS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) dbg_reg->dbcr0 |= DBCR0_IAC3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) dbg_reg->iac3 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) dbg_reg->dbcr0 |= DBCR0_IAC4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) dbg_reg->iac4 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dbg_reg->dbcr0 |= DBCR0_IDM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) int type, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (type & KVMPPC_DEBUG_WATCH_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) dbg_reg->dbcr0 |= DBCR0_DAC1R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (type & KVMPPC_DEBUG_WATCH_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) dbg_reg->dbcr0 |= DBCR0_DAC1W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) dbg_reg->dac1 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (type & KVMPPC_DEBUG_WATCH_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) dbg_reg->dbcr0 |= DBCR0_DAC2R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (type & KVMPPC_DEBUG_WATCH_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) dbg_reg->dbcr0 |= DBCR0_DAC2W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) dbg_reg->dac2 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) dbg_reg->dbcr0 |= DBCR0_IDM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /* XXX: Add similar MSR protection for BookE-PR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (prot_bitmap & MSR_UCLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) vcpu->arch.shadow_msrp |= MSRP_UCLEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (prot_bitmap & MSR_DE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) vcpu->arch.shadow_msrp |= MSRP_DEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (prot_bitmap & MSR_PMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) vcpu->arch.shadow_msrp |= MSRP_PMMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (prot_bitmap & MSR_UCLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (prot_bitmap & MSR_DE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) vcpu->arch.shadow_msrp &= ~MSRP_DEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (prot_bitmap & MSR_PMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) int gtlb_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) gpa_t gpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) #ifdef CONFIG_KVM_E500V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!(vcpu->arch.shared->msr & MSR_PR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) pte->eaddr = eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) (eaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) pte->vpage = eaddr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) pte->may_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) pte->may_write = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) pte->may_execute = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /* Check the guest TLB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) switch (xlid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) case XLATE_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) case XLATE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /* Do we have a TLB entry at all? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (gtlb_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) pte->eaddr = eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) pte->vpage = eaddr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /* XXX read permissions from the guest TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) pte->may_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) pte->may_write = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) pte->may_execute = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct kvm_guest_debug *dbg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) struct debug_reg *dbg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) int n, b = 0, w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) vcpu->arch.dbg_reg.dbcr0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) vcpu->guest_debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) kvm_guest_protect_msr(vcpu, MSR_DE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) kvm_guest_protect_msr(vcpu, MSR_DE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) vcpu->guest_debug = dbg->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) vcpu->arch.dbg_reg.dbcr0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) /* Code below handles only HW breakpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) dbg_reg = &(vcpu->arch.dbg_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) dbg_reg->dbcr1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) dbg_reg->dbcr2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) DBCR1_IAC4US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) uint64_t addr = dbg->arch.bp[n].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) uint32_t type = dbg->arch.bp[n].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (type == KVMPPC_DEBUG_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (type & ~(KVMPPC_DEBUG_WATCH_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) KVMPPC_DEBUG_WATCH_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) KVMPPC_DEBUG_BREAKPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (type & KVMPPC_DEBUG_BREAKPOINT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /* Setting H/W breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) /* Setting H/W watchpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) type, w++))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) vcpu->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) current->thread.kvm_vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) current->thread.kvm_vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) vcpu->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /* Clear pending debug event in DBSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) kvmppc_clear_dbsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) int kvmppc_core_init_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) return kvm->arch.kvm_ops->init_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) vcpu->arch.regs.nip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) vcpu->arch.shared->pir = vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) kvmppc_set_msr(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) #ifndef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) vcpu->arch.shadow_pid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) vcpu->arch.shared->msr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /* Eye-catching numbers so we know if the guest takes an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * before it's programmed its own IVPR/IVORs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) vcpu->arch.ivpr = 0x55550000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) vcpu->arch.ivor[i] = 0x7700 | i * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) kvmppc_init_timing_stats(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) r = kvmppc_core_vcpu_setup(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) kvmppc_sanity_check(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) void kvmppc_core_destroy_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) kvm->arch.kvm_ops->destroy_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) int __init kvmppc_booke_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) #ifndef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) unsigned long ivor[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) unsigned long *handler = kvmppc_booke_handler_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) unsigned long max_ivor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) unsigned long handler_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) /* We install our own exception handlers by hijacking IVPR. IVPR must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * be 16-bit aligned, so we need a 64KB allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) VCPU_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (!kvmppc_booke_handlers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /* XXX make sure our handlers are smaller than Linux's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /* Copy our interrupt handlers to match host IVORs. That way we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * have to swap the IVORs on every guest/host transition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) ivor[0] = mfspr(SPRN_IVOR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) ivor[1] = mfspr(SPRN_IVOR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ivor[2] = mfspr(SPRN_IVOR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) ivor[3] = mfspr(SPRN_IVOR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) ivor[4] = mfspr(SPRN_IVOR4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) ivor[5] = mfspr(SPRN_IVOR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) ivor[6] = mfspr(SPRN_IVOR6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) ivor[7] = mfspr(SPRN_IVOR7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) ivor[8] = mfspr(SPRN_IVOR8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) ivor[9] = mfspr(SPRN_IVOR9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ivor[10] = mfspr(SPRN_IVOR10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) ivor[11] = mfspr(SPRN_IVOR11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ivor[12] = mfspr(SPRN_IVOR12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ivor[13] = mfspr(SPRN_IVOR13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ivor[14] = mfspr(SPRN_IVOR14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) ivor[15] = mfspr(SPRN_IVOR15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (ivor[i] > max_ivor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) max_ivor = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) handler_len = handler[i + 1] - handler[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) memcpy((void *)kvmppc_booke_handlers + ivor[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) (void *)handler[i], handler_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) handler_len = handler[max_ivor + 1] - handler[max_ivor];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) ivor[max_ivor] + handler_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) #endif /* !BOOKE_HV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) void __exit kvmppc_booke_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) kvm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }