^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright IBM Corp. 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors: Hollis Blanchard <hollisb@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irqbypass.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kvm_irqfd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/xive.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/plpar_wrappers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/ultravisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "timing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "../mm/mmu_decl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct kvmppc_ops *kvmppc_hv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct kvmppc_ops *kvmppc_pr_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return kvm_arch_vcpu_runnable(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Common checks before entering the guest world. Call with interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * == 1 if we're ready to go into guest state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * <= 0 if we need to go back to the host with return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) WARN_ON(irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) kvmppc_account_exit(vcpu, SIGNAL_EXITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) vcpu->run->exit_reason = KVM_EXIT_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) r = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) vcpu->mode = IN_GUEST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Reading vcpu->requests must happen after setting vcpu->mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * so we don't miss a request because the requester sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * before next entering the guest (and thus doesn't IPI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * This also orders the write to mode from any reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * to the page tables done while the VCPU is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Please see the comment in kvm_flush_remote_tlbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (kvm_request_pending(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Make sure we process requests preemptable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) trace_kvm_check_requests(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) r = kvmppc_core_check_requests(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (r > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (kvmppc_core_prepare_to_enter(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* interrupts got enabled in between, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) are back at square 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) guest_enter_irqoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* return to host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) shared->sprg0 = swab64(shared->sprg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) shared->sprg1 = swab64(shared->sprg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) shared->sprg2 = swab64(shared->sprg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) shared->sprg3 = swab64(shared->sprg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) shared->srr0 = swab64(shared->srr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) shared->srr1 = swab64(shared->srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) shared->dar = swab64(shared->dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) shared->msr = swab64(shared->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) shared->dsisr = swab32(shared->dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) shared->int_pending = swab32(shared->int_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) shared->sr[i] = swab32(shared->sr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int nr = kvmppc_get_gpr(vcpu, 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* 32 bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) param1 &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) param2 &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) param3 &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) param4 &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) switch (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Book3S can be little endian, find it out here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int shared_big_endian = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (vcpu->arch.intr_msr & MSR_LE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) shared_big_endian = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (shared_big_endian != vcpu->arch.shared_big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) kvmppc_swab_shared(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) vcpu->arch.shared_big_endian = shared_big_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Older versions of the Linux magic page code had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * a bug where they would map their trampoline code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * NX. If that's the case, remove !PR NX capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) vcpu->arch.disable_kernel_nx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #ifdef CONFIG_PPC_64K_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Make sure our 4k magic page is in the same window of a 64k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * page within the guest and within the host's page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if ((vcpu->arch.magic_page_pa & 0xf000) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ((ulong)vcpu->arch.shared & 0xf000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void *old_shared = vcpu->arch.shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ulong shared = (ulong)vcpu->arch.shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void *new_shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) shared &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) shared |= vcpu->arch.magic_page_pa & 0xf000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) new_shared = (void*)shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) memcpy(new_shared, old_shared, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) vcpu->arch.shared = new_shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) r = EV_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) r = EV_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Second return value is in r4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) case EV_HCALL_TOKEN(EV_IDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) r = EV_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) kvm_vcpu_block(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) kvm_clear_request(KVM_REQ_UNHALT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) r = EV_UNIMPLEMENTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) kvmppc_set_gpr(vcpu, 4, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int r = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* We have to know what CPU to virtualize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!vcpu->arch.pvr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* PAPR only works with book3s_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* HV KVM can only do PAPR mode for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!cpu_has_feature(CPU_FTR_EMB_HV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) r = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) vcpu->arch.sane = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return r ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) enum emulation_result er;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) er = kvmppc_emulate_loadstore(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) switch (er) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case EMULATE_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Future optimization: only reload non-volatiles if they were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * actually modified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) r = RESUME_GUEST_NV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) case EMULATE_AGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) case EMULATE_DO_MMIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) vcpu->run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* We must reload nonvolatiles because "update" load/store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * instructions modify register state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Future optimization: only reload non-volatiles if they were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * actually modified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) r = RESUME_HOST_NV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u32 last_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* XXX Deliver Program interrupt to guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) bool data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct kvmppc_pte pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) vcpu->stat.st++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if ((!r) || (r == -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) XLATE_WRITE, &pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *eaddr = pte.raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!pte.may_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Magic page override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) !(kvmppc_get_msr(vcpu) & MSR_PR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void *magic = vcpu->arch.shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) magic += pte.eaddr & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) memcpy(magic, ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return EMULATE_DO_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) EXPORT_SYMBOL_GPL(kvmppc_st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) bool data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct kvmppc_pte pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) vcpu->stat.ld++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if ((!rc) || (rc == -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) XLATE_READ, &pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *eaddr = pte.raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!pte.may_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!data && !pte.may_execute)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Magic page override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) !(kvmppc_get_msr(vcpu) & MSR_PR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void *magic = vcpu->arch.shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) magic += pte.eaddr & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) memcpy(ptr, magic, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return EMULATE_DO_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) EXPORT_SYMBOL_GPL(kvmppc_ld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int kvm_arch_hardware_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int kvm_arch_hardware_setup(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int kvm_arch_check_processor_compat(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return kvmppc_core_check_processor_compat();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct kvmppc_ops *kvm_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * if we have both HV and PR enabled, default is HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (type == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (kvmppc_hv_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) kvm_ops = kvmppc_hv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) kvm_ops = kvmppc_pr_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!kvm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) } else if (type == KVM_VM_PPC_HV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!kvmppc_hv_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) kvm_ops = kvmppc_hv_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else if (type == KVM_VM_PPC_PR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!kvmppc_pr_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kvm_ops = kvmppc_pr_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) kvm->arch.kvm_ops = kvm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return kvmppc_core_init_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) void kvm_arch_destroy_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * We call kick_all_cpus_sync() to ensure that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * CPUs have executed any pending IPIs before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * continue and free VCPUs structures below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (is_kvmppc_hv_enabled(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) kick_all_cpus_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) kvm_for_each_vcpu(i, vcpu, kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) kvm_vcpu_destroy(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kvm->vcpus[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) atomic_set(&kvm->online_vcpus, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) kvmppc_core_destroy_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* drop the module reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) module_put(kvm->arch.kvm_ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Assume we're using HV mode when the HV module is loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int hv_enabled = kvmppc_hv_ops ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Hooray - we know which VM type we're running on. Depend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * that rather than the guess above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) hv_enabled = is_kvmppc_hv_enabled(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) switch (ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case KVM_CAP_PPC_BOOKE_SREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) case KVM_CAP_PPC_BOOKE_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case KVM_CAP_PPC_EPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case KVM_CAP_PPC_SEGSTATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case KVM_CAP_PPC_HIOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case KVM_CAP_PPC_PAPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case KVM_CAP_PPC_UNSET_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case KVM_CAP_PPC_IRQ_LEVEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case KVM_CAP_ENABLE_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) case KVM_CAP_ONE_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case KVM_CAP_IOEVENTFD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case KVM_CAP_DEVICE_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case KVM_CAP_IMMEDIATE_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case KVM_CAP_SET_GUEST_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) case KVM_CAP_PPC_PAIRED_SINGLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case KVM_CAP_PPC_OSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case KVM_CAP_PPC_GET_PVINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case KVM_CAP_SW_TLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* We support this only for PR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) r = !hv_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #ifdef CONFIG_KVM_MPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case KVM_CAP_IRQ_MPIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case KVM_CAP_SPAPR_TCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) case KVM_CAP_SPAPR_TCE_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case KVM_CAP_SPAPR_TCE_VFIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) r = !!cpu_has_feature(CPU_FTR_HVMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) case KVM_CAP_PPC_RTAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) case KVM_CAP_PPC_FIXUP_HCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) case KVM_CAP_PPC_ENABLE_HCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) case KVM_CAP_IRQ_XICS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case KVM_CAP_PPC_GET_CPU_CHAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #ifdef CONFIG_KVM_XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) case KVM_CAP_PPC_IRQ_XIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * We need XIVE to be enabled on the platform (implies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * a POWER9 processor) and the PowerNV platform, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * nested is not yet supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) kvmppc_xive_native_supported();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) case KVM_CAP_PPC_ALLOC_HTAB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) r = hv_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case KVM_CAP_PPC_SMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (kvm->arch.emul_smt_mode > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) r = kvm->arch.emul_smt_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) r = kvm->arch.smt_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) } else if (hv_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) r = threads_per_subcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case KVM_CAP_PPC_SMT_POSSIBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (hv_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!cpu_has_feature(CPU_FTR_ARCH_300))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) r = ((threads_per_subcore << 1) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* P9 can emulate dbells, so allow any mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) r = 8 | 4 | 2 | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) case KVM_CAP_PPC_RMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case KVM_CAP_PPC_HWRNG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) r = kvmppc_hwrng_present();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) case KVM_CAP_PPC_MMU_RADIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) r = !!(hv_enabled && radix_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) case KVM_CAP_PPC_MMU_HASH_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) cpu_has_feature(CPU_FTR_HVMODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) case KVM_CAP_PPC_NESTED_HV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) !kvmppc_hv_ops->enable_nested(NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) case KVM_CAP_SYNC_MMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) r = hv_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) case KVM_CAP_PPC_HTAB_FD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) r = hv_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case KVM_CAP_NR_VCPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Recommending a number of CPUs is somewhat arbitrary; we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * return the number of present CPUs for -HV (since a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * will have secondary threads "offline"), and for other KVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * implementations just count online CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (hv_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) r = num_present_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) r = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case KVM_CAP_MAX_VCPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) r = KVM_MAX_VCPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case KVM_CAP_MAX_VCPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) r = KVM_MAX_VCPU_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case KVM_CAP_PPC_GET_SMMU_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case KVM_CAP_SPAPR_MULTITCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) case KVM_CAP_SPAPR_RESIZE_HPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) r = !!hv_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) case KVM_CAP_PPC_FWNMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) r = hv_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) case KVM_CAP_PPC_HTM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) case KVM_CAP_PPC_SECURE_GUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) r = hv_enabled && kvmppc_hv_ops->enable_svm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) !kvmppc_hv_ops->enable_svm(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) long kvm_arch_dev_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) kvmppc_core_free_memslot(kvm, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) int kvm_arch_prepare_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void kvm_arch_commit_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct kvm_memory_slot *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) const struct kvm_memory_slot *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) kvmppc_core_flush_memslot(kvm, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) kvmppc_decrementer_func(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) vcpu->arch.dec_expires = get_tb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) #ifdef CONFIG_KVM_EXIT_TIMING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) mutex_init(&vcpu->arch.exit_timing_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) err = kvmppc_subarch_vcpu_init(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) err = kvmppc_core_vcpu_create(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto out_vcpu_uninit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) vcpu->arch.waitp = &vcpu->wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) out_vcpu_uninit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) kvmppc_subarch_vcpu_uninit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Make sure we're not using the vcpu anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) hrtimer_cancel(&vcpu->arch.dec_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) kvmppc_remove_vcpu_debugfs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) switch (vcpu->arch.irq_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) case KVMPPC_IRQ_MPIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) case KVMPPC_IRQ_XICS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (xics_on_xive())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) kvmppc_xive_cleanup_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) kvmppc_xics_free_icp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) case KVMPPC_IRQ_XIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) kvmppc_xive_native_cleanup_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) kvmppc_core_vcpu_free(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) kvmppc_subarch_vcpu_uninit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return kvmppc_core_pending_dec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * vrsave (formerly usprg0) isn't used by Linux, but may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * be used by the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * On non-booke this is associated with Altivec and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * is handled by code in book3s.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) kvmppc_core_vcpu_load(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) kvmppc_core_vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * irq_bypass_add_producer and irq_bypass_del_producer are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * useful if the architecture supports PCI passthrough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * irq_bypass_stop and irq_bypass_start are not needed and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * kvm_ops are not defined for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) bool kvm_arch_has_irq_bypass(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct irq_bypass_producer *prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct kvm_kernel_irqfd *irqfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) container_of(cons, struct kvm_kernel_irqfd, consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct kvm *kvm = irqfd->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (kvm->arch.kvm_ops->irq_bypass_add_producer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct irq_bypass_producer *prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct kvm_kernel_irqfd *irqfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) container_of(cons, struct kvm_kernel_irqfd, consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct kvm *kvm = irqfd->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (kvm->arch.kvm_ops->irq_bypass_del_producer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static inline int kvmppc_get_vsr_dword_offset(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if ((index != 0) && (index != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) offset = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) offset = 1 - index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static inline int kvmppc_get_vsr_word_offset(int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if ((index > 3) || (index < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) offset = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) offset = 3 - index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) u64 gpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (index >= 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) val.vval = VCPU_VSX_VR(vcpu, index - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) val.vsxval[offset] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) VCPU_VSX_VR(vcpu, index - 32) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) VCPU_VSX_FPR(vcpu, index, offset) = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) u64 gpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (index >= 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) val.vval = VCPU_VSX_VR(vcpu, index - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) val.vsxval[0] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) val.vsxval[1] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) VCPU_VSX_VR(vcpu, index - 32) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) VCPU_VSX_FPR(vcpu, index, 0) = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) VCPU_VSX_FPR(vcpu, index, 1) = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) u32 gpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (index >= 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) val.vsx32val[0] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) val.vsx32val[1] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) val.vsx32val[2] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) val.vsx32val[3] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) VCPU_VSX_VR(vcpu, index - 32) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) val.vsx32val[0] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) val.vsx32val[1] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) u32 gpr32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int dword_offset, word_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (index >= 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) val.vval = VCPU_VSX_VR(vcpu, index - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) val.vsx32val[offset] = gpr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) VCPU_VSX_VR(vcpu, index - 32) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dword_offset = offset / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) word_offset = offset % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) val.vsx32val[word_offset] = gpr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) int index, int element_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int elts = sizeof(vector128)/element_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if ((index < 0) || (index >= elts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (kvmppc_need_byteswap(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) offset = elts - index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) offset = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) u64 gpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int offset = kvmppc_get_vmx_dword_offset(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) val.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) val.vsxval[offset] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) VCPU_VSX_VR(vcpu, index) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) u32 gpr32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int offset = kvmppc_get_vmx_word_offset(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) val.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) val.vsx32val[offset] = gpr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) VCPU_VSX_VR(vcpu, index) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) u16 gpr16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) int offset = kvmppc_get_vmx_hword_offset(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) val.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) val.vsx16val[offset] = gpr16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) VCPU_VSX_VR(vcpu, index) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) u8 gpr8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int offset = kvmppc_get_vmx_byte_offset(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) val.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) val.vsx8val[offset] = gpr8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) VCPU_VSX_VR(vcpu, index) = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static inline u64 sp_to_dp(u32 fprs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) u64 fprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) enable_kernel_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) : "fr0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return fprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static inline u32 dp_to_sp(u64 fprd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) u32 fprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) enable_kernel_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) : "fr0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return fprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) #define sp_to_dp(x) (x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) #define dp_to_sp(x) (x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) #endif /* CONFIG_PPC_FPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) u64 gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (run->mmio.len > sizeof(gpr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!vcpu->arch.mmio_host_swabbed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) switch (run->mmio.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) case 8: gpr = *(u64 *)run->mmio.data; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) case 4: gpr = *(u32 *)run->mmio.data; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) case 2: gpr = *(u16 *)run->mmio.data; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) case 1: gpr = *(u8 *)run->mmio.data; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) switch (run->mmio.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case 1: gpr = *(u8 *)run->mmio.data; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* conversion between single and double precision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) gpr = sp_to_dp(gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (vcpu->arch.mmio_sign_extend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) switch (run->mmio.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) gpr = (s64)(s32)gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) gpr = (s64)(s16)gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) gpr = (s64)(s8)gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) case KVM_MMIO_REG_GPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) case KVM_MMIO_REG_FPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (vcpu->kvm->arch.kvm_ops->giveup_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) case KVM_MMIO_REG_QPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case KVM_MMIO_REG_FQPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) case KVM_MMIO_REG_VSX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (vcpu->kvm->arch.kvm_ops->giveup_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) kvmppc_set_vsr_dword(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) kvmppc_set_vsr_word(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) else if (vcpu->arch.mmio_copy_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) kvmppc_set_vsr_dword_dump(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) else if (vcpu->arch.mmio_copy_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) kvmppc_set_vsr_word_dump(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) case KVM_MMIO_REG_VMX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (vcpu->kvm->arch.kvm_ops->giveup_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) kvmppc_set_vmx_dword(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) kvmppc_set_vmx_word(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) else if (vcpu->arch.mmio_copy_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) KVMPPC_VMX_COPY_HWORD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) kvmppc_set_vmx_hword(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) else if (vcpu->arch.mmio_copy_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) KVMPPC_VMX_COPY_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) kvmppc_set_vmx_byte(vcpu, gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) case KVM_MMIO_REG_NESTED_GPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (kvmppc_need_byteswap(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) gpr = swab64(gpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) sizeof(gpr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) unsigned int rt, unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int is_default_endian, int sign_extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) bool host_swabbed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* Pity C doesn't have a logical XOR operator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (kvmppc_need_byteswap(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) host_swabbed = is_default_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) host_swabbed = !is_default_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (bytes > sizeof(run->mmio.data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) run->mmio.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) run->mmio.phys_addr = vcpu->arch.paddr_accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) run->mmio.len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) run->mmio.is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) vcpu->arch.io_gpr = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) vcpu->arch.mmio_host_swabbed = host_swabbed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) vcpu->mmio_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) vcpu->mmio_is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) vcpu->arch.mmio_sign_extend = sign_extend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) bytes, &run->mmio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) kvmppc_complete_mmio_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) vcpu->mmio_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return EMULATE_DO_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) int kvmppc_handle_load(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) unsigned int rt, unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int is_default_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) EXPORT_SYMBOL_GPL(kvmppc_handle_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* Same as above, but sign extends */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) unsigned int rt, unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int is_default_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) unsigned int rt, unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) int is_default_endian, int mmio_sign_extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) enum emulation_result emulated = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (vcpu->arch.mmio_vsx_copy_nums > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) while (vcpu->arch.mmio_vsx_copy_nums) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) emulated = __kvmppc_handle_load(vcpu, rt, bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) is_default_endian, mmio_sign_extend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (emulated != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) vcpu->arch.mmio_vsx_copy_nums--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) vcpu->arch.mmio_vsx_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int kvmppc_handle_store(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) u64 val, unsigned int bytes, int is_default_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) void *data = run->mmio.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) int idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bool host_swabbed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* Pity C doesn't have a logical XOR operator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (kvmppc_need_byteswap(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) host_swabbed = is_default_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) host_swabbed = !is_default_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (bytes > sizeof(run->mmio.data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) run->mmio.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) run->mmio.phys_addr = vcpu->arch.paddr_accessed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) run->mmio.len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) run->mmio.is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) vcpu->mmio_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) vcpu->mmio_is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) val = dp_to_sp(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* Store the value at the lowest bytes in 'data'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (!host_swabbed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) switch (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) case 8: *(u64 *)data = val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case 4: *(u32 *)data = val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case 2: *(u16 *)data = val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) case 1: *(u8 *)data = val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) switch (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) case 8: *(u64 *)data = swab64(val); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case 4: *(u32 *)data = swab32(val); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) case 2: *(u16 *)data = swab16(val); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) case 1: *(u8 *)data = val; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) bytes, &run->mmio.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) vcpu->mmio_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return EMULATE_DO_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) EXPORT_SYMBOL_GPL(kvmppc_handle_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) u32 dword_offset, word_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) union kvmppc_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) int vsx_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) int copy_type = vcpu->arch.mmio_copy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) switch (copy_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) case KVMPPC_VSX_COPY_DWORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) vsx_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (vsx_offset == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) result = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (rs < 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) *val = reg.vsxval[vsx_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) case KVMPPC_VSX_COPY_WORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) vsx_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (vsx_offset == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) result = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (rs < 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) dword_offset = vsx_offset / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) word_offset = vsx_offset % 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) *val = reg.vsx32val[word_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) *val = reg.vsx32val[vsx_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) result = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int rs, unsigned int bytes, int is_default_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) enum emulation_result emulated = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) vcpu->arch.io_gpr = rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (vcpu->arch.mmio_vsx_copy_nums > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) while (vcpu->arch.mmio_vsx_copy_nums) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) emulated = kvmppc_handle_store(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) val, bytes, is_default_endian);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (emulated != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) vcpu->arch.mmio_vsx_copy_nums--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) vcpu->arch.mmio_vsx_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) enum emulation_result emulated = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) vcpu->arch.paddr_accessed += run->mmio.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (!vcpu->mmio_is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) emulated = kvmppc_handle_vsx_store(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) vcpu->arch.io_gpr, run->mmio.len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) switch (emulated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) case EMULATE_DO_MMIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) unsigned int rt, unsigned int bytes, int is_default_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) enum emulation_result emulated = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (vcpu->arch.mmio_vmx_copy_nums > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) while (vcpu->arch.mmio_vmx_copy_nums) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) emulated = __kvmppc_handle_load(vcpu, rt, bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) is_default_endian, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (emulated != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) vcpu->arch.mmio_vmx_copy_nums--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) vcpu->arch.mmio_vmx_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) union kvmppc_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int vmx_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) vmx_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (vmx_offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) reg.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) *val = reg.vsxval[vmx_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) union kvmppc_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int vmx_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) vmx_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (vmx_offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) reg.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) *val = reg.vsx32val[vmx_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) union kvmppc_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) int vmx_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) vmx_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (vmx_offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) reg.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) *val = reg.vsx16val[vmx_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) union kvmppc_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) int vmx_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) vmx_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (vmx_offset == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) reg.vval = VCPU_VSX_VR(vcpu, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) *val = reg.vsx8val[vmx_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) unsigned int rs, unsigned int bytes, int is_default_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) unsigned int index = rs & KVM_MMIO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) enum emulation_result emulated = EMULATE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (vcpu->arch.mmio_vmx_copy_nums > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) vcpu->arch.io_gpr = rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) while (vcpu->arch.mmio_vmx_copy_nums) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) switch (vcpu->arch.mmio_copy_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) case KVMPPC_VMX_COPY_DWORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) case KVMPPC_VMX_COPY_WORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) case KVMPPC_VMX_COPY_HWORD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) case KVMPPC_VMX_COPY_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) emulated = kvmppc_handle_store(vcpu, val, bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) is_default_endian);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (emulated != EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) vcpu->arch.mmio_vmx_copy_nums--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) vcpu->arch.mmio_vmx_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) enum emulation_result emulated = EMULATE_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) vcpu->arch.paddr_accessed += run->mmio.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (!vcpu->mmio_is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) emulated = kvmppc_handle_vmx_load(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) vcpu->arch.io_gpr, run->mmio.len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) emulated = kvmppc_handle_vmx_store(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) vcpu->arch.io_gpr, run->mmio.len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) switch (emulated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) case EMULATE_DO_MMIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) run->exit_reason = KVM_EXIT_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) case EMULATE_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) r = RESUME_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) r = RESUME_GUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) size = one_reg_size(reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (size > sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) r = kvmppc_get_one_reg(vcpu, reg->id, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (r == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) case KVM_REG_PPC_VSCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case KVM_REG_PPC_VRSAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) val = get_reg_val(reg->id, vcpu->arch.vrsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) union kvmppc_one_reg val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) size = one_reg_size(reg->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (size > sizeof(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) r = kvmppc_set_one_reg(vcpu, reg->id, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (r == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) case KVM_REG_PPC_VSCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) case KVM_REG_PPC_VRSAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) vcpu->arch.vrsave = set_reg_val(reg->id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) struct kvm_run *run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (vcpu->mmio_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) vcpu->mmio_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (!vcpu->mmio_is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) kvmppc_complete_mmio_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (vcpu->arch.mmio_vsx_copy_nums > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) vcpu->arch.mmio_vsx_copy_nums--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) vcpu->arch.mmio_vsx_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (vcpu->arch.mmio_vsx_copy_nums > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (r == RESUME_HOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) vcpu->mmio_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (vcpu->arch.mmio_vmx_copy_nums > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) vcpu->arch.mmio_vmx_copy_nums--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) vcpu->arch.mmio_vmx_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (vcpu->arch.mmio_vmx_copy_nums > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (r == RESUME_HOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) vcpu->mmio_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) } else if (vcpu->arch.osi_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) u64 *gprs = run->osi.gprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) kvmppc_set_gpr(vcpu, i, gprs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) vcpu->arch.osi_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) } else if (vcpu->arch.hcall_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) for (i = 0; i < 9; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) vcpu->arch.hcall_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) } else if (vcpu->arch.epr_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) kvmppc_set_epr(vcpu, run->epr.epr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) vcpu->arch.epr_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) kvm_sigset_activate(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (run->immediate_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) r = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) r = kvmppc_vcpu_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) kvm_sigset_deactivate(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (irq->irq == KVM_INTERRUPT_UNSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) kvmppc_core_dequeue_external(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) kvmppc_core_queue_external(vcpu, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct kvm_enable_cap *cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (cap->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) switch (cap->cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) case KVM_CAP_PPC_OSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) vcpu->arch.osi_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) case KVM_CAP_PPC_PAPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) vcpu->arch.papr_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) case KVM_CAP_PPC_EPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (cap->args[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) case KVM_CAP_PPC_BOOKE_WATCHDOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) vcpu->arch.watchdog_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) case KVM_CAP_SW_TLB: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct kvm_config_tlb cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) #ifdef CONFIG_KVM_MPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) case KVM_CAP_IRQ_MPIC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct kvm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) r = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) f = fdget(cap->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) r = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) dev = kvm_device_from_filp(f.file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) case KVM_CAP_IRQ_XICS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct kvm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) r = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) f = fdget(cap->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) r = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) dev = kvm_device_from_filp(f.file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (xics_on_xive())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) #endif /* CONFIG_KVM_XICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) #ifdef CONFIG_KVM_XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) case KVM_CAP_PPC_IRQ_XIVE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) struct kvm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) r = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) f = fdget(cap->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (!xive_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) r = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) dev = kvm_device_from_filp(f.file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) cap->args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) #endif /* CONFIG_KVM_XIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) case KVM_CAP_PPC_FWNMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (!is_kvmppc_hv_enabled(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) vcpu->kvm->arch.fwnmi_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) r = kvmppc_sanity_check(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) bool kvm_arch_intc_initialized(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) #ifdef CONFIG_KVM_MPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (kvm->arch.mpic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (kvm->arch.xics || kvm->arch.xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) struct kvm_mp_state *mp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct kvm_mp_state *mp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) long kvm_arch_vcpu_async_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct kvm_vcpu *vcpu = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (ioctl == KVM_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct kvm_interrupt irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (copy_from_user(&irq, argp, sizeof(irq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) long kvm_arch_vcpu_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct kvm_vcpu *vcpu = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) long r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) case KVM_ENABLE_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct kvm_enable_cap cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (copy_from_user(&cap, argp, sizeof(cap)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) case KVM_SET_ONE_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) case KVM_GET_ONE_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) struct kvm_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (copy_from_user(®, argp, sizeof(reg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (ioctl == KVM_SET_ONE_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) case KVM_DIRTY_TLB: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct kvm_dirty_tlb dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (copy_from_user(&dirty, argp, sizeof(dirty)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) u32 inst_nop = 0x60000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) #ifdef CONFIG_KVM_BOOKE_HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) u32 inst_sc1 = 0x44000022;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) pvinfo->hcall[1] = cpu_to_be32(inst_nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) pvinfo->hcall[2] = cpu_to_be32(inst_nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) pvinfo->hcall[3] = cpu_to_be32(inst_nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) u32 inst_lis = 0x3c000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) u32 inst_ori = 0x60000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) u32 inst_sc = 0x44000002;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) u32 inst_imm_mask = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * The hypercall to get into KVM from within guest context is as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) * lis r0, r0, KVM_SC_MAGIC_R0@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * ori r0, KVM_SC_MAGIC_R0@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) * sc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) * nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) pvinfo->hcall[2] = cpu_to_be32(inst_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) pvinfo->hcall[3] = cpu_to_be32(inst_nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!irqchip_in_kernel(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) irq_event->irq, irq_event->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct kvm_enable_cap *cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (cap->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) switch (cap->cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) case KVM_CAP_PPC_ENABLE_HCALL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) unsigned long hcall = cap->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) cap->args[1] > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (cap->args[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) set_bit(hcall / 4, kvm->arch.enabled_hcalls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) case KVM_CAP_PPC_SMT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) unsigned long mode = cap->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) unsigned long flags = cap->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (kvm->arch.kvm_ops->set_smt_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) case KVM_CAP_PPC_NESTED_HV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (!is_kvmppc_hv_enabled(kvm) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) !kvm->arch.kvm_ops->enable_nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) r = kvm->arch.kvm_ops->enable_nested(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) case KVM_CAP_PPC_SECURE_GUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) r = kvm->arch.kvm_ops->enable_svm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) * These functions check whether the underlying hardware is safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) * against attacks based on observing the effects of speculatively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * executed instructions, and whether it supplies instructions for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * use in workarounds. The information comes from firmware, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * via the device tree on powernv platforms or from an hcall on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) * pseries platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct h_cpu_char_result c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) unsigned long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (!machine_is(pseries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) rc = plpar_get_cpu_characteristics(&c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (rc == H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) cp->character = c.character;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) cp->behaviour = c.behaviour;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) static inline bool have_fw_feat(struct device_node *fw_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) const char *state, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) bool r = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) np = of_get_child_by_name(fw_features, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) r = of_property_read_bool(np, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) struct device_node *np, *fw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) memset(cp, 0, sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) r = pseries_get_cpu_char(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (r != -ENOTTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) np = of_find_node_by_name(NULL, "ibm,opal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) fw_features = of_get_child_by_name(np, "fw-features");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (!fw_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) "inst-spec-barrier-ori31,31,0"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) "fw-bcctrl-serialized"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) "inst-l1d-flush-ori30,30,0"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) "inst-l1d-flush-trig2"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) "fw-l1d-thread-split"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) "fw-count-cache-disabled"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) "fw-count-cache-flush-bcctr2,0,0"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) "speculation-policy-favor-security"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (!have_fw_feat(fw_features, "disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) "needs-l1d-flush-msr-pr-0-to-1"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (!have_fw_feat(fw_features, "disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) "needs-spec-barrier-for-bound-checks"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (have_fw_feat(fw_features, "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) "needs-count-cache-flush-on-context-switch"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) of_node_put(fw_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) long kvm_arch_vm_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct kvm *kvm __maybe_unused = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) long r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) case KVM_PPC_GET_PVINFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) struct kvm_ppc_pvinfo pvinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) memset(&pvinfo, 0, sizeof(pvinfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) #ifdef CONFIG_SPAPR_TCE_IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) case KVM_CREATE_SPAPR_TCE_64: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct kvm_create_spapr_tce_64 create_tce_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (create_tce_64.flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) case KVM_CREATE_SPAPR_TCE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) struct kvm_create_spapr_tce create_tce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) struct kvm_create_spapr_tce_64 create_tce_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) create_tce_64.liobn = create_tce.liobn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) create_tce_64.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) create_tce_64.size = create_tce.window_size >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) IOMMU_PAGE_SHIFT_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) create_tce_64.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) case KVM_PPC_GET_SMMU_INFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct kvm_ppc_smmu_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) case KVM_PPC_RTAS_DEFINE_TOKEN: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) case KVM_PPC_CONFIGURE_V3_MMU: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct kvm_ppc_mmuv3_cfg cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (!kvm->arch.kvm_ops->configure_mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (copy_from_user(&cfg, argp, sizeof(cfg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) case KVM_PPC_GET_RMMU_INFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) struct kvm_ppc_rmmu_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (!kvm->arch.kvm_ops->get_rmmu_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) case KVM_PPC_GET_CPU_CHAR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct kvm_ppc_cpu_char cpuchar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) r = kvmppc_get_cpu_char(&cpuchar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) case KVM_PPC_SVM_OFF: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (!kvm->arch.kvm_ops->svm_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) r = kvm->arch.kvm_ops->svm_off(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) default: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) #else /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) r = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) static unsigned long nr_lpids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) long kvmppc_alloc_lpid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) long lpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (lpid >= nr_lpids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) pr_err("%s: No LPIDs free\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) } while (test_and_set_bit(lpid, lpid_inuse));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return lpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) void kvmppc_claim_lpid(long lpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) set_bit(lpid, lpid_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) void kvmppc_free_lpid(long lpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) clear_bit(lpid, lpid_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) void kvmppc_init_lpid(unsigned long nr_lpids_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) memset(lpid_inuse, 0, sizeof(lpid_inuse));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) int kvm_arch_init(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);