^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Alexander Graf <agraf@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Kevin Wolf <mail@kevin-wolf.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This file is derived from arch/powerpc/kvm/44x.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * by Hollis Blanchard <hollisb@us.ibm.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/kvm_book3s.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/xive.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "book3s.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* #define EXIT_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct kvm_stats_debugfs_item debugfs_entries[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) VCPU_STAT("exits", sum_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) VCPU_STAT("mmio", mmio_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) VCPU_STAT("sig", signal_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) VCPU_STAT("sysc", syscall_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) VCPU_STAT("inst_emu", emulated_inst_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) VCPU_STAT("dec", dec_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) VCPU_STAT("ext_intr", ext_intr_exits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) VCPU_STAT("queue_intr", queue_intr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) VCPU_STAT("halt_wait_ns", halt_wait_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) VCPU_STAT("halt_successful_poll", halt_successful_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) VCPU_STAT("halt_successful_wait", halt_successful_wait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) VCPU_STAT("halt_wakeup", halt_wakeup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) VCPU_STAT("pf_storage", pf_storage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) VCPU_STAT("sp_storage", sp_storage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) VCPU_STAT("pf_instruc", pf_instruc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) VCPU_STAT("sp_instruc", sp_instruc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) VCPU_STAT("ld", ld),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) VCPU_STAT("ld_slow", ld_slow),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) VCPU_STAT("st", st),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) VCPU_STAT("st_slow", st_slow),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) VCPU_STAT("pthru_all", pthru_all),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) VCPU_STAT("pthru_host", pthru_host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) VCPU_STAT("pthru_bad_aff", pthru_bad_aff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) VM_STAT("largepages_2M", num_2M_pages, .mode = 0444),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) VM_STAT("largepages_1G", num_1G_pages, .mode = 0444),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long pending_now, unsigned long old_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (is_kvmppc_hv_enabled(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (pending_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) kvmppc_set_int_pending(vcpu, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) else if (old_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kvmppc_set_int_pending(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ulong crit_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ulong crit_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bool crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (is_kvmppc_hv_enabled(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) crit_raw = kvmppc_get_critical(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) crit_r1 = kvmppc_get_gpr(vcpu, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Truncate crit indicators in 32 bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) crit_raw &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) crit_r1 &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Critical section when crit == r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) crit = (crit_raw == crit_r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* ... and we're in supervisor mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int kvmppc_book3s_vec2irqprio(unsigned int vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) switch (vec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) default: prio = BOOK3S_IRQPRIO_MAX; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned int vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long old_pending = vcpu->arch.pending_exceptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) clear_bit(kvmppc_book3s_vec2irqprio(vec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) old_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) vcpu->stat.queue_intr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) set_bit(kvmppc_book3s_vec2irqprio(vec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #ifdef EXIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) printk(KERN_INFO "Queueing interrupt %x\n", vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* might as well deliver this straight away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* might as well deliver this straight away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* might as well deliver this straight away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* might as well deliver this straight away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* might as well deliver this straight away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct kvm_interrupt *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * This case (KVM_INTERRUPT_SET) should never actually arise for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * a pseries guest (because pseries guests expect their interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * controllers to continue asserting an external interrupt request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * until it is acknowledged at the interrupt controller), but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * included to avoid ABI breakage and potentially for other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * sorts of guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * There is a subtlety here: HV KVM does not test the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * external_oneshot flag in the code that synthesizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * external interrupts for the guest just before entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * the guest. That is OK even if userspace did do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * which ends up doing a smp_send_reschedule(), which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * pull the guest all the way out to the host, meaning that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * we will call kvmppc_core_prepare_to_enter() before entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * the guest again, and that will handle the external_oneshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * flag correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (irq->irq == KVM_INTERRUPT_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) vcpu->arch.external_oneshot = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ulong flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kvmppc_set_dar(vcpu, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kvmppc_set_dsisr(vcpu, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int deliver = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int vec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bool crit = kvmppc_critical_section(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) switch (priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) case BOOK3S_IRQPRIO_DECREMENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) vec = BOOK3S_INTERRUPT_DECREMENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case BOOK3S_IRQPRIO_EXTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) vec = BOOK3S_INTERRUPT_EXTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) case BOOK3S_IRQPRIO_SYSTEM_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case BOOK3S_IRQPRIO_MACHINE_CHECK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case BOOK3S_IRQPRIO_DATA_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) vec = BOOK3S_INTERRUPT_DATA_STORAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) case BOOK3S_IRQPRIO_INST_STORAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) vec = BOOK3S_INTERRUPT_INST_STORAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) case BOOK3S_IRQPRIO_DATA_SEGMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) case BOOK3S_IRQPRIO_INST_SEGMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) vec = BOOK3S_INTERRUPT_INST_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) case BOOK3S_IRQPRIO_ALIGNMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) vec = BOOK3S_INTERRUPT_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) case BOOK3S_IRQPRIO_PROGRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) vec = BOOK3S_INTERRUPT_PROGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) case BOOK3S_IRQPRIO_VSX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) vec = BOOK3S_INTERRUPT_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) case BOOK3S_IRQPRIO_ALTIVEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) vec = BOOK3S_INTERRUPT_ALTIVEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) case BOOK3S_IRQPRIO_FP_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) case BOOK3S_IRQPRIO_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) vec = BOOK3S_INTERRUPT_SYSCALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) case BOOK3S_IRQPRIO_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) vec = BOOK3S_INTERRUPT_TRACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) vec = BOOK3S_INTERRUPT_PERFMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case BOOK3S_IRQPRIO_FAC_UNAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) deliver = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (deliver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) kvmppc_inject_interrupt(vcpu, vec, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * This function determines if an irqprio should be cleared once issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) switch (priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) case BOOK3S_IRQPRIO_DECREMENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* DEC interrupts get cleared by mtdec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) case BOOK3S_IRQPRIO_EXTERNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * External interrupts get cleared by userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * except when set by the KVM_INTERRUPT ioctl with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (vcpu->arch.external_oneshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) vcpu->arch.external_oneshot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned long *pending = &vcpu->arch.pending_exceptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long old_pending = vcpu->arch.pending_exceptions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unsigned int priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #ifdef EXIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (vcpu->arch.pending_exceptions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) priority = __ffs(*pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) while (priority < BOOK3S_IRQPRIO_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) clear_irqprio(vcpu, priority)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) clear_bit(priority, &vcpu->arch.pending_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) priority = find_next_bit(pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) BITS_PER_BYTE * sizeof(*pending),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) priority + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Tell the guest about our interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) kvmppc_update_int_pending(vcpu, *pending, old_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bool *writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) gfn_t gfn = gpa >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!(kvmppc_get_msr(vcpu) & MSR_SF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mp_pa = (uint32_t)mp_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Magic page override */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) gpa &= ~0xFFFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) kvm_pfn_t pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) get_page(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *writable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) bool data = (xlid == XLATE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) bool iswrite = (xlrw == XLATE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (relocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) pte->eaddr = eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pte->raddr = eaddr & KVM_PAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pte->vpage = VSID_REAL | eaddr >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) pte->may_read = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) pte->may_write = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) pte->may_execute = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) !data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pte->raddr &= ~SPLIT_HACK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) enum instruction_fetch_type type, u32 *inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ulong pc = kvmppc_get_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (type == INST_SC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (r == EMULATE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return EMULATE_AGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) regs->pc = kvmppc_get_pc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) regs->cr = kvmppc_get_cr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) regs->ctr = kvmppc_get_ctr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) regs->lr = kvmppc_get_lr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) regs->xer = kvmppc_get_xer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) regs->msr = kvmppc_get_msr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) regs->srr0 = kvmppc_get_srr0(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) regs->srr1 = kvmppc_get_srr1(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) regs->pid = vcpu->arch.pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) regs->sprg0 = kvmppc_get_sprg0(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) regs->sprg1 = kvmppc_get_sprg1(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) regs->sprg2 = kvmppc_get_sprg2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) regs->sprg3 = kvmppc_get_sprg3(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) regs->sprg4 = kvmppc_get_sprg4(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) regs->sprg5 = kvmppc_get_sprg5(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) regs->sprg6 = kvmppc_get_sprg6(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) regs->sprg7 = kvmppc_get_sprg7(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) kvmppc_set_pc(vcpu, regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) kvmppc_set_cr(vcpu, regs->cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kvmppc_set_ctr(vcpu, regs->ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) kvmppc_set_lr(vcpu, regs->lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) kvmppc_set_xer(vcpu, regs->xer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) kvmppc_set_msr(vcpu, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) kvmppc_set_srr0(vcpu, regs->srr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) kvmppc_set_srr1(vcpu, regs->srr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) kvmppc_set_sprg0(vcpu, regs->sprg0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kvmppc_set_sprg1(vcpu, regs->sprg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kvmppc_set_sprg2(vcpu, regs->sprg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) kvmppc_set_sprg3(vcpu, regs->sprg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) kvmppc_set_sprg4(vcpu, regs->sprg4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) kvmppc_set_sprg5(vcpu, regs->sprg5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) kvmppc_set_sprg6(vcpu, regs->sprg6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kvmppc_set_sprg7(vcpu, regs->sprg7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) long int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (r == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) case KVM_REG_PPC_DAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) *val = get_reg_val(id, kvmppc_get_dar(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) case KVM_REG_PPC_DSISR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) i = id - KVM_REG_PPC_FPR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *val = get_reg_val(id, VCPU_FPR(vcpu, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) case KVM_REG_PPC_FPSCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *val = get_reg_val(id, vcpu->arch.fp.fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (cpu_has_feature(CPU_FTR_VSX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) i = id - KVM_REG_PPC_VSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) case KVM_REG_PPC_DEBUG_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *val = get_reg_val(id, INS_TW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case KVM_REG_PPC_ICP_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (xics_on_xive())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) #endif /* CONFIG_KVM_XICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #ifdef CONFIG_KVM_XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) case KVM_REG_PPC_VP_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!vcpu->arch.xive_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (xive_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) r = kvmppc_xive_native_get_vp(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #endif /* CONFIG_KVM_XIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) case KVM_REG_PPC_FSCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *val = get_reg_val(id, vcpu->arch.fscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case KVM_REG_PPC_TAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) *val = get_reg_val(id, vcpu->arch.tar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case KVM_REG_PPC_EBBHR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *val = get_reg_val(id, vcpu->arch.ebbhr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) case KVM_REG_PPC_EBBRR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *val = get_reg_val(id, vcpu->arch.ebbrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case KVM_REG_PPC_BESCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *val = get_reg_val(id, vcpu->arch.bescr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case KVM_REG_PPC_IC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) *val = get_reg_val(id, vcpu->arch.ic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) union kvmppc_one_reg *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) long int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (r == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) case KVM_REG_PPC_DAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) kvmppc_set_dar(vcpu, set_reg_val(id, *val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) case KVM_REG_PPC_DSISR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) i = id - KVM_REG_PPC_FPR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) case KVM_REG_PPC_FPSCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) vcpu->arch.fp.fpscr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (cpu_has_feature(CPU_FTR_VSX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) i = id - KVM_REG_PPC_VSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) case KVM_REG_PPC_ICP_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (xics_on_xive())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) #endif /* CONFIG_KVM_XICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #ifdef CONFIG_KVM_XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) case KVM_REG_PPC_VP_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!vcpu->arch.xive_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (xive_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) r = kvmppc_xive_native_set_vp(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #endif /* CONFIG_KVM_XIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case KVM_REG_PPC_FSCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) vcpu->arch.fscr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) case KVM_REG_PPC_TAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) vcpu->arch.tar = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) case KVM_REG_PPC_EBBHR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) vcpu->arch.ebbhr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) case KVM_REG_PPC_EBBRR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) vcpu->arch.ebbrr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) case KVM_REG_PPC_BESCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) vcpu->arch.bescr = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case KVM_REG_PPC_IC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) vcpu->arch.ic = set_reg_val(id, *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) EXPORT_SYMBOL_GPL(kvmppc_set_msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct kvm_translation *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct kvm_guest_debug *dbg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) vcpu->guest_debug = dbg->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) kvmppc_core_queue_dec(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) kvm->arch.kvm_ops->free_memslot(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int kvmppc_core_prepare_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) void kvmppc_core_commit_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const struct kvm_memory_slot *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) const struct kvm_memory_slot *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return kvm->arch.kvm_ops->age_hva(kvm, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int kvmppc_core_init_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) mutex_init(&kvm->arch.rtas_token_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return kvm->arch.kvm_ops->init_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) void kvmppc_core_destroy_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) kvm->arch.kvm_ops->destroy_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) kvmppc_rtas_tokens_free(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Free the XIVE and XICS devices which are not directly freed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * device 'release' method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) kfree(kvm->arch.xive_devices.native);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kvm->arch.xive_devices.native = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) kfree(kvm->arch.xive_devices.xics_on_xive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) kvm->arch.xive_devices.xics_on_xive = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) kfree(kvm->arch.xics_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) kvm->arch.xics_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) #endif /* CONFIG_KVM_XICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) unsigned long size = kvmppc_get_gpr(vcpu, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) unsigned long addr = kvmppc_get_gpr(vcpu, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) u64 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!is_power_of_2(size) || (size > sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return H_TOO_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return H_TOO_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) unsigned long size = kvmppc_get_gpr(vcpu, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long addr = kvmppc_get_gpr(vcpu, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) unsigned long val = kvmppc_get_gpr(vcpu, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u64 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) *(u8 *)&buf = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) *(__be16 *)&buf = cpu_to_be16(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) *(__be32 *)&buf = cpu_to_be32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) *(__be64 *)&buf = cpu_to_be64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return H_TOO_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return H_TOO_HARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) int kvmppc_core_check_processor_compat(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * We always return 0 for book3s. We check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * for compatibility while loading the HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * or PR module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return kvm->arch.kvm_ops->hcall_implemented(hcall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (xics_on_xive())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct kvm *kvm, int irq_source_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int level, bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) level, line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct kvm *kvm, int irq_source_id, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int kvm_irq_map_gsi(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct kvm_kernel_irq_routing_entry *entries, int gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) entries->gsi = gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) entries->type = KVM_IRQ_ROUTING_IRQCHIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) entries->set = kvmppc_book3s_set_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) entries->irqchip.irqchip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) entries->irqchip.pin = gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #endif /* CONFIG_KVM_XICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static int kvmppc_book3s_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) r = kvmppc_book3s_init_pr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #ifdef CONFIG_KVM_XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (xics_on_xive()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) kvmppc_xive_init_module();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (kvmppc_xive_native_supported()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) kvmppc_xive_native_init_module();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) kvm_register_device_ops(&kvm_xive_native_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) KVM_DEV_TYPE_XIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static void kvmppc_book3s_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (xics_on_xive()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) kvmppc_xive_exit_module();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) kvmppc_xive_native_exit_module();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) kvmppc_book3s_exit_pr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) kvm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) module_init(kvmppc_book3s_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) module_exit(kvmppc_book3s_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* On 32bit this is our one and only kernel module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) MODULE_ALIAS_MISCDEV(KVM_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) MODULE_ALIAS("devname:kvm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #endif