^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * hosting IBM Z kernel virtual machines (s390x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2008, 2020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Carsten Otte <cotte@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Christian Borntraeger <borntraeger@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Heiko Carstens <heiko.carstens@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Christian Ehrhardt <ehrhardt@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Jason J. Herne <jjherne@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define KMSG_COMPONENT "kvm-s390"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/lowcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/stp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/gmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/isc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/sclp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/cpacf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/ap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "kvm-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "gaccess.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "trace-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define LOCAL_IRQS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) (KVM_MAX_VCPUS + LOCAL_IRQS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct kvm_stats_debugfs_item debugfs_entries[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) VCPU_STAT("userspace_handled", exit_userspace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) VCPU_STAT("exit_null", exit_null),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) VCPU_STAT("exit_validity", exit_validity),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) VCPU_STAT("exit_stop_request", exit_stop_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) VCPU_STAT("exit_external_request", exit_external_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) VCPU_STAT("exit_io_request", exit_io_request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) VCPU_STAT("exit_instruction", exit_instruction),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) VCPU_STAT("exit_pei", exit_pei),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) VCPU_STAT("exit_program_interruption", exit_program_interruption),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) VCPU_STAT("exit_operation_exception", exit_operation_exception),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) VCPU_STAT("halt_successful_poll", halt_successful_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) VCPU_STAT("halt_wakeup", halt_wakeup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) VCPU_STAT("instruction_lctlg", instruction_lctlg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) VCPU_STAT("instruction_lctl", instruction_lctl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) VCPU_STAT("instruction_stctl", instruction_stctl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) VCPU_STAT("instruction_stctg", instruction_stctg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) VCPU_STAT("deliver_ckc", deliver_ckc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) VCPU_STAT("deliver_cputm", deliver_cputm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) VCPU_STAT("deliver_external_call", deliver_external_call),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) VCPU_STAT("deliver_service_signal", deliver_service_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) VCPU_STAT("deliver_virtio", deliver_virtio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) VCPU_STAT("deliver_program", deliver_program),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) VCPU_STAT("deliver_io", deliver_io),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) VCPU_STAT("deliver_machine_check", deliver_machine_check),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) VCPU_STAT("exit_wait_state", exit_wait_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) VCPU_STAT("inject_ckc", inject_ckc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) VCPU_STAT("inject_cputm", inject_cputm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) VCPU_STAT("inject_external_call", inject_external_call),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) VM_STAT("inject_float_mchk", inject_float_mchk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) VM_STAT("inject_io", inject_io),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) VCPU_STAT("inject_mchk", inject_mchk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) VM_STAT("inject_pfault_done", inject_pfault_done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) VCPU_STAT("inject_program", inject_program),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) VCPU_STAT("inject_restart", inject_restart),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) VM_STAT("inject_service_signal", inject_service_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) VCPU_STAT("inject_set_prefix", inject_set_prefix),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) VCPU_STAT("inject_stop_signal", inject_stop_signal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) VCPU_STAT("inject_pfault_init", inject_pfault_init),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) VM_STAT("inject_virtio", inject_virtio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) VCPU_STAT("instruction_epsw", instruction_epsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) VCPU_STAT("instruction_gs", instruction_gs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) VCPU_STAT("instruction_io_other", instruction_io_other),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) VCPU_STAT("instruction_lpsw", instruction_lpsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) VCPU_STAT("instruction_lpswe", instruction_lpswe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) VCPU_STAT("instruction_pfmf", instruction_pfmf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) VCPU_STAT("instruction_ptff", instruction_ptff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) VCPU_STAT("instruction_stidp", instruction_stidp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) VCPU_STAT("instruction_sck", instruction_sck),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) VCPU_STAT("instruction_sckpf", instruction_sckpf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) VCPU_STAT("instruction_spx", instruction_spx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) VCPU_STAT("instruction_stpx", instruction_stpx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) VCPU_STAT("instruction_stap", instruction_stap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) VCPU_STAT("instruction_iske", instruction_iske),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) VCPU_STAT("instruction_ri", instruction_ri),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) VCPU_STAT("instruction_rrbe", instruction_rrbe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) VCPU_STAT("instruction_sske", instruction_sske),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) VCPU_STAT("instruction_essa", instruction_essa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) VCPU_STAT("instruction_stsi", instruction_stsi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) VCPU_STAT("instruction_stfl", instruction_stfl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) VCPU_STAT("instruction_tb", instruction_tb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) VCPU_STAT("instruction_tpi", instruction_tpi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) VCPU_STAT("instruction_tprot", instruction_tprot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) VCPU_STAT("instruction_tsch", instruction_tsch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) VCPU_STAT("instruction_sthyi", instruction_sthyi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) VCPU_STAT("instruction_sie", instruction_sie),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) VCPU_STAT("instruction_diag_10", diagnose_10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) VCPU_STAT("instruction_diag_44", diagnose_44),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) VCPU_STAT("instruction_diag_9c", diagnose_9c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) VCPU_STAT("instruction_diag_258", diagnose_258),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) VCPU_STAT("instruction_diag_308", diagnose_308),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) VCPU_STAT("instruction_diag_500", diagnose_500),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) VCPU_STAT("instruction_diag_other", diagnose_other),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) { NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct kvm_s390_tod_clock_ext {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) __u8 epoch_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __u64 tod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __u8 reserved[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* allow nested virtualization in KVM (if enabled by user space) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int nested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) module_param(nested, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) MODULE_PARM_DESC(nested, "Nested virtualization support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* allow 1m huge page guest backing, if !nested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int hpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) module_param(hpage, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MODULE_PARM_DESC(hpage, "1m huge page backing support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* maximum percentage of steal time for polling. >100 is treated like 100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static u8 halt_poll_max_steal = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) module_param(halt_poll_max_steal, byte, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* if set to true, the GISA will be initialized and used if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static bool use_gisa = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) module_param(use_gisa, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * For now we handle at most 16 double words as this is what the s390 base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * kernel handles and stores in the prefix page. If we ever need to go beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * this, this requires changes to code, but the external uapi can stay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define SIZE_INTERNAL 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Base feature mask that defines default mask for facilities. Consists of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * defines in FACILITIES_KVM and the non-hypervisor managed bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * and defines the facilities that can be enabled via a cpu model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static unsigned long kvm_s390_fac_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sizeof(S390_lowcore.stfle_fac_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return SIZE_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* available cpu features supported by kvm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* available subfunctions indicated via query / "test bit" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static struct gmap_notifier gmap_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static struct gmap_notifier vsie_gmap_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) debug_info_t *kvm_s390_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) debug_info_t *kvm_s390_dbf_uv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Section: not file related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int kvm_arch_hardware_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* every s390 is virtualization enabled ;-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int kvm_arch_check_processor_compat(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* forward declarations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int sca_switch_to_extended(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u8 delta_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * The TOD jumps by delta, we have to compensate this by adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * -delta to the epoch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) delta = -delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* sign-extension - we're adding to signed values below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if ((s64)delta < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) delta_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) scb->epoch += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (scb->ecd & ECD_MEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) scb->epdx += delta_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (scb->epoch < delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) scb->epdx += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * This callback is executed during stop_machine(). All CPUs are therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * temporarily stopped. In order not to change guest behavior, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * disable preemption whenever we touch the epoch of kvm and the VCPUs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * so a CPU won't be stopped while calculating with the epoch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct kvm *kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) unsigned long long *delta = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) list_for_each_entry(kvm, &vm_list, vm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) kvm->arch.epoch = vcpu->arch.sie_block->epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) kvm->arch.epdx = vcpu->arch.sie_block->epdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (vcpu->arch.cputm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) vcpu->arch.cputm_start += *delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (vcpu->arch.vsie_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kvm_clock_sync_scb(vcpu->arch.vsie_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static struct notifier_block kvm_clock_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) .notifier_call = kvm_clock_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int kvm_arch_hardware_setup(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) gmap_notifier.notifier_call = kvm_gmap_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) gmap_register_pte_notifier(&gmap_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) gmap_register_pte_notifier(&vsie_gmap_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) atomic_notifier_chain_register(&s390_epoch_delta_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) &kvm_clock_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void kvm_arch_hardware_unsetup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) gmap_unregister_pte_notifier(&gmap_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) gmap_unregister_pte_notifier(&vsie_gmap_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) &kvm_clock_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void allow_cpu_feat(unsigned long nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) set_bit_inv(nr, kvm_s390_available_cpu_feat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static inline int plo_test_bit(unsigned char nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long function = (unsigned long)nr | 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) " lgr 0,%[function]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Parameter registers are ignored for "test bit" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) " plo 0,0,0,0(0)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) " ipm %0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) " srl %0,28\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) : "=d" (cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) : [function] "d" (function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) : "cc", "0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return cc == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) " lghi 0,0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) " lgr 1,%[query]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Parameter registers are ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) " .insn rrf,%[opc] << 16,2,4,6,0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) : "cc", "memory", "0", "1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define INSN_SORTL 0xb938
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #define INSN_DFLTCC 0xb939
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void kvm_s390_cpu_feat_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) for (i = 0; i < 256; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (plo_test_bit(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (test_facility(28)) /* TOD-clock steering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ptff(kvm_s390_available_subfunc.ptff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sizeof(kvm_s390_available_subfunc.ptff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) PTFF_QAF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (test_facility(17)) { /* MSA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) kvm_s390_available_subfunc.kmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) kvm_s390_available_subfunc.kmc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __cpacf_query(CPACF_KM, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) kvm_s390_available_subfunc.km);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) kvm_s390_available_subfunc.kimd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) kvm_s390_available_subfunc.klmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (test_facility(76)) /* MSA3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) kvm_s390_available_subfunc.pckmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (test_facility(77)) { /* MSA4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) kvm_s390_available_subfunc.kmctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) kvm_s390_available_subfunc.kmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) kvm_s390_available_subfunc.kmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) kvm_s390_available_subfunc.pcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (test_facility(57)) /* MSA5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) kvm_s390_available_subfunc.ppno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (test_facility(146)) /* MSA8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) kvm_s390_available_subfunc.kma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (test_facility(155)) /* MSA9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kvm_s390_available_subfunc.kdsa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (test_facility(150)) /* SORTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (test_facility(151)) /* DFLTCC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (MACHINE_HAS_ESOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) !test_facility(3) || !nested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (sclp.has_64bscao)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (sclp.has_siif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (sclp.has_gpere)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (sclp.has_gsls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (sclp.has_ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (sclp.has_cei)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (sclp.has_ibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (sclp.has_kss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * all skey handling functions read/set the skey from the PGSTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * instead of the real storage key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * pages being detected as preserved although they are resident.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * cannot easily shadow the SCA because of the ipte lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int kvm_arch_init(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!kvm_s390_dbf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!kvm_s390_dbf_uv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) kvm_s390_cpu_feat_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Register floating interrupt controller interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pr_err("A FLIC registration call failed with rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) rc = kvm_s390_gib_init(GAL_ISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) kvm_arch_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) void kvm_arch_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) kvm_s390_gib_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) debug_unregister(kvm_s390_dbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) debug_unregister(kvm_s390_dbf_uv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Section: device related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) long kvm_arch_dev_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (ioctl == KVM_S390_ENABLE_SIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return s390_enable_sie();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) switch (ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case KVM_CAP_S390_PSW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case KVM_CAP_S390_GMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case KVM_CAP_SYNC_MMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #ifdef CONFIG_KVM_S390_UCONTROL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case KVM_CAP_S390_UCONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case KVM_CAP_ASYNC_PF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case KVM_CAP_SYNC_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case KVM_CAP_ONE_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case KVM_CAP_ENABLE_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case KVM_CAP_S390_CSS_SUPPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) case KVM_CAP_IOEVENTFD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case KVM_CAP_DEVICE_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case KVM_CAP_S390_IRQCHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) case KVM_CAP_VM_ATTRIBUTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case KVM_CAP_MP_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) case KVM_CAP_IMMEDIATE_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) case KVM_CAP_S390_INJECT_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) case KVM_CAP_S390_USER_SIGP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case KVM_CAP_S390_USER_STSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) case KVM_CAP_S390_SKEYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case KVM_CAP_S390_IRQ_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case KVM_CAP_S390_USER_INSTR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) case KVM_CAP_S390_CMMA_MIGRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) case KVM_CAP_S390_AIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) case KVM_CAP_S390_AIS_MIGRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) case KVM_CAP_S390_VCPU_RESETS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case KVM_CAP_SET_GUEST_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) case KVM_CAP_S390_DIAG318:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case KVM_CAP_S390_HPAGE_1M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (hpage && !kvm_is_ucontrol(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) case KVM_CAP_S390_MEM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) r = MEM_OP_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) case KVM_CAP_NR_VCPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case KVM_CAP_MAX_VCPUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) case KVM_CAP_MAX_VCPU_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) r = KVM_S390_BSCA_CPU_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!kvm_s390_use_sca_entries())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) r = KVM_MAX_VCPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) else if (sclp.has_esca && sclp.has_64bscao)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) r = KVM_S390_ESCA_CPU_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) case KVM_CAP_S390_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) r = MACHINE_HAS_ESOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) case KVM_CAP_S390_VECTOR_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) r = MACHINE_HAS_VX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) case KVM_CAP_S390_RI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) r = test_facility(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) case KVM_CAP_S390_GS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) r = test_facility(133);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case KVM_CAP_S390_BPB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) r = test_facility(82);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) case KVM_CAP_S390_PROTECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) r = is_prot_virt_host();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) gfn_t cur_gfn, last_gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned long gaddr, vmaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct gmap *gmap = kvm->arch.gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Loop over all guest segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) cur_gfn = memslot->base_gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) last_gfn = memslot->base_gfn + memslot->npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) gaddr = gfn_to_gpa(cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (kvm_is_error_hva(vmaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bitmap_zero(bitmap, _PAGE_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) for (i = 0; i < _PAGE_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (test_bit(i, bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) mark_page_dirty(kvm, cur_gfn + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Section: vm related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void sca_del_vcpu(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * Get (and clear) the dirty memory log for a memory slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct kvm_dirty_log *log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct kvm_memory_slot *memslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int is_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (kvm_is_ucontrol(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (log->slot >= KVM_USER_MEM_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* Clear the dirty log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (is_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) n = kvm_dirty_bitmap_bytes(memslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) memset(memslot->dirty_bitmap, 0, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (cap->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) switch (cap->cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) case KVM_CAP_S390_IRQCHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) kvm->arch.use_irqchip = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case KVM_CAP_S390_USER_SIGP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) kvm->arch.user_sigp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) case KVM_CAP_S390_VECTOR_REGISTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) } else if (MACHINE_HAS_VX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) set_kvm_facility(kvm->arch.model.fac_mask, 129);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) set_kvm_facility(kvm->arch.model.fac_list, 129);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (test_facility(134)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) set_kvm_facility(kvm->arch.model.fac_mask, 134);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) set_kvm_facility(kvm->arch.model.fac_list, 134);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (test_facility(135)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) set_kvm_facility(kvm->arch.model.fac_mask, 135);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) set_kvm_facility(kvm->arch.model.fac_list, 135);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (test_facility(148)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) set_kvm_facility(kvm->arch.model.fac_mask, 148);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) set_kvm_facility(kvm->arch.model.fac_list, 148);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (test_facility(152)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) set_kvm_facility(kvm->arch.model.fac_mask, 152);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) set_kvm_facility(kvm->arch.model.fac_list, 152);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) r ? "(not available)" : "(success)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) case KVM_CAP_S390_RI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) } else if (test_facility(64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) set_kvm_facility(kvm->arch.model.fac_mask, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) set_kvm_facility(kvm->arch.model.fac_list, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) r ? "(not available)" : "(success)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) case KVM_CAP_S390_AIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) set_kvm_facility(kvm->arch.model.fac_mask, 72);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) set_kvm_facility(kvm->arch.model.fac_list, 72);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) VM_EVENT(kvm, 3, "ENABLE: AIS %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) r ? "(not available)" : "(success)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) case KVM_CAP_S390_GS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) } else if (test_facility(133)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) set_kvm_facility(kvm->arch.model.fac_mask, 133);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) set_kvm_facility(kvm->arch.model.fac_list, 133);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) r ? "(not available)" : "(success)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) case KVM_CAP_S390_HPAGE_1M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (kvm->created_vcpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) mmap_write_lock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) kvm->mm->context.allow_gmap_hpage_1m = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mmap_write_unlock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * We might have to create fake 4k page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * tables. To avoid that the hardware works on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * stale PGSTEs, we emulate these instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) kvm->arch.use_skf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) kvm->arch.use_pfmfi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) r ? "(not available)" : "(success)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) case KVM_CAP_S390_USER_STSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) kvm->arch.user_stsi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) case KVM_CAP_S390_USER_INSTR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) kvm->arch.user_instr0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) icpt_operexc_on_all_vcpus(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) case KVM_S390_VM_MEM_LIMIT_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) kvm->arch.mem_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) case KVM_S390_VM_MEM_ENABLE_CMMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!sclp.has_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (kvm->created_vcpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) else if (kvm->mm->context.allow_gmap_hpage_1m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) kvm->arch.use_cmma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Not compatible with cmma. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) kvm->arch.use_pfmfi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) case KVM_S390_VM_MEM_CLR_CMMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!sclp.has_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!kvm->arch.use_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) s390_reset_cmma(kvm->arch.gmap->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) srcu_read_unlock(&kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) case KVM_S390_VM_MEM_LIMIT_SIZE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) unsigned long new_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (kvm_is_ucontrol(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (get_user(new_limit, (u64 __user *)attr->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) new_limit > kvm->arch.mem_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!new_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* gmap_create takes last usable address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (new_limit != KVM_S390_NO_MEM_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) new_limit -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* gmap_create will round the limit up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct gmap *new = gmap_create(current->mm, new_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) gmap_remove(kvm->arch.gmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) new->private = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) kvm->arch.gmap = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) (void *) kvm->arch.gmap->asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) kvm_s390_vcpu_block_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kvm_s390_vcpu_crypto_setup(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* recreate the shadow crycb by leaving the VSIE handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) kvm_s390_vcpu_unblock_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!test_kvm_facility(kvm, 76)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) get_random_bytes(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) kvm->arch.crypto.crycb->aes_wrapping_key_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kvm->arch.crypto.aes_kw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (!test_kvm_facility(kvm, 76)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) get_random_bytes(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) kvm->arch.crypto.crycb->dea_wrapping_key_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) kvm->arch.crypto.dea_kw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!test_kvm_facility(kvm, 76)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) kvm->arch.crypto.aes_kw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!test_kvm_facility(kvm, 76)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) kvm->arch.crypto.dea_kw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) case KVM_S390_VM_CRYPTO_ENABLE_APIE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (!ap_instructions_available()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) kvm->arch.crypto.apie = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case KVM_S390_VM_CRYPTO_DISABLE_APIE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!ap_instructions_available()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) kvm->arch.crypto.apie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) kvm_s390_vcpu_crypto_reset_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) kvm_for_each_vcpu(cx, vcpu, kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) kvm_s390_sync_request(req, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * Must be called with kvm->srcu held to avoid races on memslots, and with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static int kvm_s390_vm_start_migration(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct kvm_memory_slot *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct kvm_memslots *slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) unsigned long ram_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int slotnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* migration mode already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (kvm->arch.migration_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) slots = kvm_memslots(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (!slots || !slots->used_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!kvm->arch.use_cmma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kvm->arch.migration_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* mark all the pages in active slots as dirty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ms = slots->memslots + slotnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (!ms->dirty_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * The second half of the bitmap is only used on x86,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * and would be wasted otherwise, so we put it to good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * use here to keep track of the state of the storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ram_pages += ms->npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) kvm->arch.migration_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Must be called with kvm->slots_lock to avoid races with ourselves and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * kvm_s390_vm_start_migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int kvm_s390_vm_stop_migration(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* migration mode already disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!kvm->arch.migration_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) kvm->arch.migration_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (kvm->arch.use_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static int kvm_s390_vm_set_migration(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) int res = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) case KVM_S390_VM_MIGRATION_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) res = kvm_s390_vm_start_migration(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) case KVM_S390_VM_MIGRATION_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) res = kvm_s390_vm_stop_migration(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static int kvm_s390_vm_get_migration(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) u64 mig = kvm->arch.migration_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct kvm_s390_vm_tod_clock gtod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) kvm_s390_set_tod_clock(kvm, >od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) gtod.epoch_idx, gtod.tod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u8 gtod_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (copy_from_user(>od_high, (void __user *)attr->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) sizeof(gtod_high)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (gtod_high != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct kvm_s390_vm_tod_clock gtod = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (copy_from_user(>od.tod, (void __user *)attr->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) sizeof(gtod.tod)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) kvm_s390_set_tod_clock(kvm, >od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (attr->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) case KVM_S390_VM_TOD_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ret = kvm_s390_set_tod_ext(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) case KVM_S390_VM_TOD_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ret = kvm_s390_set_tod_high(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) case KVM_S390_VM_TOD_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ret = kvm_s390_set_tod_low(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static void kvm_s390_get_tod_clock(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct kvm_s390_vm_tod_clock *gtod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct kvm_s390_tod_clock_ext htod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) get_tod_clock_ext((char *)&htod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) gtod->tod = htod.tod + kvm->arch.epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) gtod->epoch_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (test_kvm_facility(kvm, 139)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (gtod->tod < htod.tod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) gtod->epoch_idx += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct kvm_s390_vm_tod_clock gtod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) memset(>od, 0, sizeof(gtod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) kvm_s390_get_tod_clock(kvm, >od);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) gtod.epoch_idx, gtod.tod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) u8 gtod_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (copy_to_user((void __user *)attr->addr, >od_high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) sizeof(gtod_high)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) u64 gtod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) gtod = kvm_s390_get_tod_clock_fast(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (attr->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) case KVM_S390_VM_TOD_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ret = kvm_s390_get_tod_ext(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) case KVM_S390_VM_TOD_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) ret = kvm_s390_get_tod_high(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) case KVM_S390_VM_TOD_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ret = kvm_s390_get_tod_low(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct kvm_s390_vm_cpu_processor *proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) u16 lowest_ibc, unblocked_ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) proc = kzalloc(sizeof(*proc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (!proc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!copy_from_user(proc, (void __user *)attr->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) sizeof(*proc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) kvm->arch.model.cpuid = proc->cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) lowest_ibc = sclp.ibc >> 16 & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) unblocked_ibc = sclp.ibc & 0xfff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (lowest_ibc && proc->ibc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (proc->ibc > unblocked_ibc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) kvm->arch.model.ibc = unblocked_ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) else if (proc->ibc < lowest_ibc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) kvm->arch.model.ibc = lowest_ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) kvm->arch.model.ibc = proc->ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) memcpy(kvm->arch.model.fac_list, proc->fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) S390_ARCH_FAC_LIST_SIZE_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) kvm->arch.model.ibc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) kvm->arch.model.cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) kvm->arch.model.fac_list[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) kvm->arch.model.fac_list[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) kvm->arch.model.fac_list[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) kfree(proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static int kvm_s390_set_processor_feat(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct kvm_s390_vm_cpu_feat data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!bitmap_subset((unsigned long *) data.feat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) kvm_s390_available_cpu_feat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) KVM_S390_VM_CPU_FEAT_NR_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) KVM_S390_VM_CPU_FEAT_NR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) data.feat[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) data.feat[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) data.feat[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (kvm->created_vcpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) sizeof(struct kvm_s390_vm_cpu_subfunc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) int ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) case KVM_S390_VM_CPU_PROCESSOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ret = kvm_s390_set_processor(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) case KVM_S390_VM_CPU_PROCESSOR_FEAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ret = kvm_s390_set_processor_feat(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ret = kvm_s390_set_processor_subfunc(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct kvm_s390_vm_cpu_processor *proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) proc = kzalloc(sizeof(*proc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (!proc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) proc->cpuid = kvm->arch.model.cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) proc->ibc = kvm->arch.model.ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) memcpy(&proc->fac_list, kvm->arch.model.fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) S390_ARCH_FAC_LIST_SIZE_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) kvm->arch.model.ibc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) kvm->arch.model.cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) kvm->arch.model.fac_list[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) kvm->arch.model.fac_list[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) kvm->arch.model.fac_list[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) kfree(proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct kvm_s390_vm_cpu_machine *mach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) mach = kzalloc(sizeof(*mach), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (!mach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) get_cpu_id((struct cpuid *) &mach->cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mach->ibc = sclp.ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) S390_ARCH_FAC_LIST_SIZE_BYTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) sizeof(S390_lowcore.stfle_fac_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) kvm->arch.model.ibc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) kvm->arch.model.cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) mach->fac_mask[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) mach->fac_mask[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) mach->fac_mask[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) mach->fac_list[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) mach->fac_list[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) mach->fac_list[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) kfree(mach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static int kvm_s390_get_processor_feat(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct kvm_s390_vm_cpu_feat data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) KVM_S390_VM_CPU_FEAT_NR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) data.feat[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) data.feat[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) data.feat[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static int kvm_s390_get_machine_feat(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct kvm_s390_vm_cpu_feat data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) bitmap_copy((unsigned long *) data.feat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) kvm_s390_available_cpu_feat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) KVM_S390_VM_CPU_FEAT_NR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) data.feat[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) data.feat[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) data.feat[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) sizeof(struct kvm_s390_vm_cpu_subfunc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) sizeof(struct kvm_s390_vm_cpu_subfunc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) int ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) case KVM_S390_VM_CPU_PROCESSOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ret = kvm_s390_get_processor(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) case KVM_S390_VM_CPU_MACHINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) ret = kvm_s390_get_machine(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) case KVM_S390_VM_CPU_PROCESSOR_FEAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ret = kvm_s390_get_processor_feat(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case KVM_S390_VM_CPU_MACHINE_FEAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ret = kvm_s390_get_machine_feat(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) ret = kvm_s390_get_processor_subfunc(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) ret = kvm_s390_get_machine_subfunc(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) case KVM_S390_VM_MEM_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) ret = kvm_s390_set_mem_control(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case KVM_S390_VM_TOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ret = kvm_s390_set_tod(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) case KVM_S390_VM_CPU_MODEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = kvm_s390_set_cpu_model(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) case KVM_S390_VM_CRYPTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ret = kvm_s390_vm_set_crypto(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) case KVM_S390_VM_MIGRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = kvm_s390_vm_set_migration(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) case KVM_S390_VM_MEM_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ret = kvm_s390_get_mem_control(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) case KVM_S390_VM_TOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ret = kvm_s390_get_tod(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) case KVM_S390_VM_CPU_MODEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) ret = kvm_s390_get_cpu_model(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) case KVM_S390_VM_MIGRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ret = kvm_s390_vm_get_migration(kvm, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) case KVM_S390_VM_MEM_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) case KVM_S390_VM_MEM_ENABLE_CMMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) case KVM_S390_VM_MEM_CLR_CMMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ret = sclp.has_cmma ? 0 : -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) case KVM_S390_VM_MEM_LIMIT_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) case KVM_S390_VM_TOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) case KVM_S390_VM_TOD_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) case KVM_S390_VM_TOD_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case KVM_S390_VM_CPU_MODEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) case KVM_S390_VM_CPU_PROCESSOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) case KVM_S390_VM_CPU_MACHINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) case KVM_S390_VM_CPU_PROCESSOR_FEAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) case KVM_S390_VM_CPU_MACHINE_FEAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) case KVM_S390_VM_CRYPTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) case KVM_S390_VM_CRYPTO_ENABLE_APIE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) case KVM_S390_VM_CRYPTO_DISABLE_APIE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ret = ap_instructions_available() ? 0 : -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) case KVM_S390_VM_MIGRATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) uint8_t *keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) uint64_t hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int srcu_idx, i, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (args->flags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /* Is this guest using storage keys? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (!mm_uses_skeys(current->mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return KVM_S390_GET_SKEYS_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /* Enforce sane limit on memory allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (!keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) srcu_idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) for (i = 0; i < args->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) hva = gfn_to_hva(kvm, args->start_gfn + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (kvm_is_error_hva(hva)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) r = get_guest_storage_key(current->mm, hva, &keys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) srcu_read_unlock(&kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) sizeof(uint8_t) * args->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) kvfree(keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) uint8_t *keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) uint64_t hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) int srcu_idx, i, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) bool unlocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (args->flags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* Enforce sane limit on memory allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (!keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) sizeof(uint8_t) * args->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /* Enable storage key handling for the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) r = s390_enable_skey();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) srcu_idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) while (i < args->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) unlocked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) hva = gfn_to_hva(kvm, args->start_gfn + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (kvm_is_error_hva(hva)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /* Lowest order bit is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (keys[i] & 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) r = set_guest_storage_key(current->mm, hva, keys[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) r = fixup_user_fault(current->mm, hva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) FAULT_FLAG_WRITE, &unlocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) srcu_read_unlock(&kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) kvfree(keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * Base address and length must be sent at the start of each block, therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * it's cheaper to send some clean data, as long as it's less than the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * two longs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /* for consistency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * Similar to gfn_to_memslot, but returns the index of a memslot also when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * address falls in a hole. In that case the index of one of the memslots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * bordering the hole is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) int start = 0, end = slots->used_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) int slot = atomic_read(&slots->lru_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct kvm_memory_slot *memslots = slots->memslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (gfn >= memslots[slot].base_gfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) gfn < memslots[slot].base_gfn + memslots[slot].npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) slot = start + (end - start) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (gfn >= memslots[slot].base_gfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) end = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) start = slot + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (start >= slots->used_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return slots->used_slots - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (gfn >= memslots[start].base_gfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) gfn < memslots[start].base_gfn + memslots[start].npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) atomic_set(&slots->lru_slot, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) u8 *res, unsigned long bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) unsigned long pgstev, hva, cur_gfn = args->start_gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) args->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) while (args->count < bufsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) hva = gfn_to_hva(kvm, cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * We return an error if the first value was invalid, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * return successfully if at least one value was copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (kvm_is_error_hva(hva))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return args->count ? 0 : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (get_pgste(kvm->mm, hva, &pgstev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) pgstev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) res[args->count++] = (pgstev >> 24) & 0x43;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) cur_gfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) unsigned long cur_gfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct kvm_memory_slot *ms = slots->memslots + slotidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) unsigned long ofs = cur_gfn - ms->base_gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (ms->base_gfn + ms->npages <= cur_gfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) slotidx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) /* If we are above the highest slot, wrap around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (slotidx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) slotidx = slots->used_slots - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) ms = slots->memslots + slotidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) while ((slotidx > 0) && (ofs >= ms->npages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) slotidx--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ms = slots->memslots + slotidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return ms->base_gfn + ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) u8 *res, unsigned long bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) struct kvm_memslots *slots = kvm_memslots(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct kvm_memory_slot *ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (unlikely(!slots->used_slots))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ms = gfn_to_memslot(kvm, cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) args->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) args->start_gfn = cur_gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (!ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) while (args->count < bufsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) hva = gfn_to_hva(kvm, cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (kvm_is_error_hva(hva))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /* Decrement only if we actually flipped the bit to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) atomic64_dec(&kvm->arch.cmma_dirty_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (get_pgste(kvm->mm, hva, &pgstev) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) pgstev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) /* Save the value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) res[args->count++] = (pgstev >> 24) & 0x43;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /* If the next bit is too far away, stop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /* If we reached the previous "next", find the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (cur_gfn == next_gfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) /* Reached the end of memory or of the buffer, stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if ((next_gfn >= mem_end) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) (next_gfn - args->start_gfn >= bufsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) cur_gfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /* Reached the end of the current memslot, take the next one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (cur_gfn - ms->base_gfn >= ms->npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) ms = gfn_to_memslot(kvm, cur_gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (!ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * This function searches for the next page with dirty CMMA attributes, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * saves the attributes in the buffer up to either the end of the buffer or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * no trailing clean bytes are saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * In case no dirty bits were found, or if CMMA was not enabled or used, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * output buffer will indicate 0 as length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static int kvm_s390_get_cmma_bits(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) struct kvm_s390_cmma_log *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) unsigned long bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int srcu_idx, peek, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) u8 *values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (!kvm->arch.use_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) /* Invalid/unsupported flags were specified */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (args->flags & ~KVM_S390_CMMA_PEEK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /* Migration mode query, and we are not doing a migration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) peek = !!(args->flags & KVM_S390_CMMA_PEEK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (!peek && !kvm->arch.migration_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /* CMMA is disabled or was not used, or the buffer has length zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!bufsize || !kvm->mm->context.uses_cmm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) memset(args, 0, sizeof(*args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /* We are not peeking, and there are no dirty pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) memset(args, 0, sizeof(*args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) values = vmalloc(bufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if (!values)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) mmap_read_lock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) srcu_idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (peek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) srcu_read_unlock(&kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) mmap_read_unlock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (kvm->arch.migration_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) args->remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (copy_to_user((void __user *)args->values, values, args->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) vfree(values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * This function sets the CMMA attributes for the given pages. If the input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) * buffer has zero length, no action is taken, otherwise the attributes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * set and the mm->context.uses_cmm flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) static int kvm_s390_set_cmma_bits(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) const struct kvm_s390_cmma_log *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) unsigned long hva, mask, pgstev, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) uint8_t *bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) int srcu_idx, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) mask = args->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (!kvm->arch.use_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* invalid/unsupported flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (args->flags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /* Enforce sane limit on memory allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (args->count > KVM_S390_CMMA_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (args->count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) bits = vmalloc(array_size(sizeof(*bits), args->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (!bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) r = copy_from_user(bits, (void __user *)args->values, args->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) mmap_read_lock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) srcu_idx = srcu_read_lock(&kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) for (i = 0; i < args->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) hva = gfn_to_hva(kvm, args->start_gfn + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (kvm_is_error_hva(hva)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) pgstev = bits[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) pgstev = pgstev << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) set_pgste_bits(kvm->mm, hva, mask, pgstev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) srcu_read_unlock(&kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) mmap_read_unlock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (!kvm->mm->context.uses_cmm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) mmap_write_lock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) kvm->mm->context.uses_cmm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) mmap_write_unlock(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) vfree(bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) u16 rc, rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * We ignore failures and try to destroy as many CPUs as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * At the same time we must not free the assigned resources when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * this fails, as the ultravisor has still access to that memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * behind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * We want to return the first failure rc and rrc, though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) mutex_lock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) *rcp = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) *rrcp = rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) mutex_unlock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) int i, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) mutex_lock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) mutex_unlock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) void __user *argp = (void __user *)cmd->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) switch (cmd->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) case KVM_PV_ENABLE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (kvm_s390_pv_is_protected(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * FMT 4 SIE needs esca. As we never switch back to bsca from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * esca, we need no cleanup in the error cases below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) r = sca_switch_to_extended(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) mmap_write_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) r = gmap_mark_unmergeable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) mmap_write_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* we need to block service interrupts from now on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) case KVM_PV_DISABLE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) if (!kvm_s390_pv_is_protected(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * If a CPU could not be destroyed, destroy VM will also fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * There is no point in trying to destroy it. Instead return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * the rc and rrc from the first CPU that failed destroying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /* no need to block service interrupts any more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) case KVM_PV_SET_SEC_PARMS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct kvm_s390_pv_sec_parm parms = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) void *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (!kvm_s390_pv_is_protected(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (copy_from_user(&parms, argp, sizeof(parms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* Currently restricted to 8KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (parms.length > PAGE_SIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) hdr = vmalloc(parms.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (!copy_from_user(hdr, (void __user *)parms.origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) parms.length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) vfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) case KVM_PV_UNPACK: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) struct kvm_s390_pv_unp unp = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (copy_from_user(&unp, argp, sizeof(unp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) case KVM_PV_VERIFY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (!kvm_s390_pv_is_protected(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) case KVM_PV_PREP_RESET: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (!kvm_s390_pv_is_protected(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) cmd->rc, cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) case KVM_PV_UNSHARE_ALL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (!kvm_s390_pv_is_protected(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) cmd->rc, cmd->rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) r = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) long kvm_arch_vm_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct kvm *kvm = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) struct kvm_device_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) case KVM_S390_INTERRUPT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) struct kvm_s390_interrupt s390int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if (copy_from_user(&s390int, argp, sizeof(s390int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) r = kvm_s390_inject_vm(kvm, &s390int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) case KVM_CREATE_IRQCHIP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) struct kvm_irq_routing_entry routing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (kvm->arch.use_irqchip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Set up dummy routing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) memset(&routing, 0, sizeof(routing));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) r = kvm_set_irq_routing(kvm, &routing, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) case KVM_SET_DEVICE_ATTR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) r = kvm_s390_vm_set_attr(kvm, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) case KVM_GET_DEVICE_ATTR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) r = kvm_s390_vm_get_attr(kvm, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) case KVM_HAS_DEVICE_ATTR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) r = kvm_s390_vm_has_attr(kvm, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) case KVM_S390_GET_SKEYS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) struct kvm_s390_skeys args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (copy_from_user(&args, argp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) sizeof(struct kvm_s390_skeys)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) r = kvm_s390_get_skeys(kvm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) case KVM_S390_SET_SKEYS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct kvm_s390_skeys args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (copy_from_user(&args, argp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) sizeof(struct kvm_s390_skeys)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) r = kvm_s390_set_skeys(kvm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) case KVM_S390_GET_CMMA_BITS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) struct kvm_s390_cmma_log args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (copy_from_user(&args, argp, sizeof(args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) r = kvm_s390_get_cmma_bits(kvm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) r = copy_to_user(argp, &args, sizeof(args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) case KVM_S390_SET_CMMA_BITS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct kvm_s390_cmma_log args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (copy_from_user(&args, argp, sizeof(args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) r = kvm_s390_set_cmma_bits(kvm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) case KVM_S390_PV_COMMAND: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) struct kvm_pv_cmd args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) /* protvirt means user sigp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) kvm->arch.user_cpu_state_ctrl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (!is_prot_virt_host()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (copy_from_user(&args, argp, sizeof(args))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (args.flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) r = kvm_s390_handle_pv(kvm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (copy_to_user(argp, &args, sizeof(args))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) r = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) static int kvm_s390_apxa_installed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) struct ap_config_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (ap_instructions_available()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (ap_qci(&info) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) return info.apxa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * The format of the crypto control block (CRYCB) is specified in the 3 low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * order bits of the CRYCB designation (CRYCBD) field as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * AP extended addressing (APXA) facility are installed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * Format 1: The APXA facility is not installed but the MSAX3 facility is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * Format 2: Both the APXA and MSAX3 facilities are installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) static void kvm_s390_set_crycb_format(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) /* Clear the CRYCB format bits - i.e., set format 0 by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) /* Check whether MSAX3 is installed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (!test_kvm_facility(kvm, 76))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) if (kvm_s390_apxa_installed())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) unsigned long *aqm, unsigned long *adm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) kvm_s390_vcpu_block_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) case CRYCB_FORMAT2: /* APCB1 use 256 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) memcpy(crycb->apcb1.apm, apm, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) apm[0], apm[1], apm[2], apm[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) memcpy(crycb->apcb1.aqm, aqm, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) aqm[0], aqm[1], aqm[2], aqm[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) memcpy(crycb->apcb1.adm, adm, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) adm[0], adm[1], adm[2], adm[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) case CRYCB_FORMAT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) case CRYCB_FORMAT0: /* Fall through both use APCB0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) memcpy(crycb->apcb0.apm, apm, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) memcpy(crycb->apcb0.aqm, aqm, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) memcpy(crycb->apcb0.adm, adm, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) apm[0], *((unsigned short *)aqm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) *((unsigned short *)adm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) default: /* Can not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) /* recreate the shadow crycb for each vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) kvm_s390_vcpu_unblock_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) void kvm_arch_crypto_clear_masks(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) kvm_s390_vcpu_block_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) memset(&kvm->arch.crypto.crycb->apcb0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) sizeof(kvm->arch.crypto.crycb->apcb0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) memset(&kvm->arch.crypto.crycb->apcb1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) sizeof(kvm->arch.crypto.crycb->apcb1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /* recreate the shadow crycb for each vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) kvm_s390_vcpu_unblock_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) static u64 kvm_s390_get_initial_cpuid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct cpuid cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) get_cpu_id(&cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) cpuid.version = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return *((u64 *) &cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) static void kvm_s390_crypto_init(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) kvm_s390_set_crycb_format(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (!test_kvm_facility(kvm, 76))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) /* Enable AES/DEA protected key functions by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) kvm->arch.crypto.aes_kw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) kvm->arch.crypto.dea_kw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) static void sca_dispose(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (kvm->arch.use_esca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) free_page((unsigned long)(kvm->arch.sca));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) kvm->arch.sca = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) gfp_t alloc_flags = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) char debug_name[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) static unsigned long sca_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) #ifdef CONFIG_KVM_S390_UCONTROL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (type & ~KVM_VM_S390_UCONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) rc = s390_enable_sie();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (!sclp.has_64bscao)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) alloc_flags |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) rwlock_init(&kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) /* start with basic SCA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (!kvm->arch.sca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) mutex_lock(&kvm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) sca_offset += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) sca_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) kvm->arch.sca = (struct bsca_block *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) ((char *) kvm->arch.sca + sca_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) mutex_unlock(&kvm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) sprintf(debug_name, "kvm-%u", current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (!kvm->arch.dbf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) kvm->arch.sie_page2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (!kvm->arch.sie_page2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) kvm->arch.sie_page2->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) for (i = 0; i < kvm_s390_fac_size(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) (kvm_s390_fac_base[i] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) kvm_s390_fac_ext[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) kvm_s390_fac_base[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) /* we are always in czam mode - even on pre z14 machines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) set_kvm_facility(kvm->arch.model.fac_mask, 138);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) set_kvm_facility(kvm->arch.model.fac_list, 138);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) /* we emulate STHYI in kvm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) set_kvm_facility(kvm->arch.model.fac_mask, 74);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) set_kvm_facility(kvm->arch.model.fac_list, 74);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) if (MACHINE_HAS_TLB_GUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) set_kvm_facility(kvm->arch.model.fac_mask, 147);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) set_kvm_facility(kvm->arch.model.fac_list, 147);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (css_general_characteristics.aiv && test_facility(65))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) set_kvm_facility(kvm->arch.model.fac_mask, 65);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) kvm->arch.model.ibc = sclp.ibc & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) kvm_s390_crypto_init(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) mutex_init(&kvm->arch.float_int.ais_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) spin_lock_init(&kvm->arch.float_int.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) for (i = 0; i < FIRQ_LIST_COUNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) init_waitqueue_head(&kvm->arch.ipte_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) mutex_init(&kvm->arch.ipte_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) VM_EVENT(kvm, 3, "vm created with type %lu", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (type & KVM_VM_S390_UCONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) kvm->arch.gmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) if (sclp.hamax == U64_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) kvm->arch.mem_limit = TASK_SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) sclp.hamax + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (!kvm->arch.gmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) kvm->arch.gmap->private = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) kvm->arch.gmap->pfault_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) kvm->arch.use_pfmfi = sclp.has_pfmfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) kvm->arch.use_skf = sclp.has_skey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) spin_lock_init(&kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) kvm_s390_vsie_init(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) if (use_gisa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) kvm_s390_gisa_init(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) free_page((unsigned long)kvm->arch.sie_page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) debug_unregister(kvm->arch.dbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) sca_dispose(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) KVM_EVENT(3, "creation of vm failed: %d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) u16 rc, rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) VCPU_EVENT(vcpu, 3, "%s", "free cpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) kvm_s390_clear_local_irqs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) kvm_clear_async_pf_completion_queue(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) if (!kvm_is_ucontrol(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) sca_del_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (kvm_is_ucontrol(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) gmap_remove(vcpu->arch.gmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (vcpu->kvm->arch.use_cmma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) kvm_s390_vcpu_unsetup_cmma(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) /* We can not hold the vcpu mutex here, we are already dying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (kvm_s390_pv_cpu_get_handle(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) free_page((unsigned long)(vcpu->arch.sie_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) static void kvm_free_vcpus(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) kvm_for_each_vcpu(i, vcpu, kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) kvm_vcpu_destroy(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) kvm->vcpus[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) atomic_set(&kvm->online_vcpus, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) void kvm_arch_destroy_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) u16 rc, rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) kvm_free_vcpus(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) sca_dispose(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) kvm_s390_gisa_destroy(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * We are already at the end of life and kvm->lock is not taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * This is ok as the file descriptor is closed by now and nobody
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * can mess with the pv state. To avoid lockdep_assert_held from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * complaining we do not use kvm_s390_pv_is_protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) if (kvm_s390_pv_get_handle(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) debug_unregister(kvm->arch.dbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) free_page((unsigned long)kvm->arch.sie_page2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (!kvm_is_ucontrol(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) gmap_remove(kvm->arch.gmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) kvm_s390_destroy_adapters(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) kvm_s390_clear_float_irqs(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) kvm_s390_vsie_destroy(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /* Section: vcpu related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) vcpu->arch.gmap = gmap_create(current->mm, -1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (!vcpu->arch.gmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) vcpu->arch.gmap->private = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) static void sca_del_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (!kvm_s390_use_sca_entries())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) read_lock(&vcpu->kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (vcpu->kvm->arch.use_esca) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) struct esca_block *sca = vcpu->kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) sca->cpu[vcpu->vcpu_id].sda = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) struct bsca_block *sca = vcpu->kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) sca->cpu[vcpu->vcpu_id].sda = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) read_unlock(&vcpu->kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) static void sca_add_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) if (!kvm_s390_use_sca_entries()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) struct bsca_block *sca = vcpu->kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) /* we still need the basic sca for the ipte control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) read_lock(&vcpu->kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (vcpu->kvm->arch.use_esca) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) struct esca_block *sca = vcpu->kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) struct bsca_block *sca = vcpu->kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) read_unlock(&vcpu->kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) /* Basic SCA to Extended SCA data copy routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) d->sda = s->sda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) d->sigp_ctrl.c = s->sigp_ctrl.c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) d->sigp_ctrl.scn = s->sigp_ctrl.scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) d->ipte_control = s->ipte_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) d->mcn[0] = s->mcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) sca_copy_entry(&d->cpu[i], &s->cpu[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) static int sca_switch_to_extended(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct bsca_block *old_sca = kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) struct esca_block *new_sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) unsigned int vcpu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) u32 scaol, scaoh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (kvm->arch.use_esca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (!new_sca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) scaoh = (u32)((u64)(new_sca) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) scaol = (u32)(u64)(new_sca) & ~0x3fU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) kvm_s390_vcpu_block_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) write_lock(&kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) sca_copy_b_to_e(new_sca, old_sca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) vcpu->arch.sie_block->scaoh = scaoh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) vcpu->arch.sie_block->scaol = scaol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) kvm->arch.sca = new_sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) kvm->arch.use_esca = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) write_unlock(&kvm->arch.sca_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) kvm_s390_vcpu_unblock_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) free_page((unsigned long)old_sca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) old_sca, kvm->arch.sca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (!kvm_s390_use_sca_entries()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) if (id < KVM_MAX_VCPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (id < KVM_S390_BSCA_CPU_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) if (!sclp.has_esca || !sclp.has_64bscao)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) vcpu->arch.cputm_start = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) vcpu->arch.cputm_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) WARN_ON_ONCE(vcpu->arch.cputm_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) vcpu->arch.cputm_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) __start_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) __stop_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) vcpu->arch.cputm_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) preempt_disable(); /* protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) __enable_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) preempt_disable(); /* protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) __disable_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) /* set the cpu timer - may only be called from the VCPU thread itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) preempt_disable(); /* protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) if (vcpu->arch.cputm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) vcpu->arch.cputm_start = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) vcpu->arch.sie_block->cputm = cputm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) /* update and get the cpu timer - can also be called from other VCPU threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) __u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) if (unlikely(!vcpu->arch.cputm_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) return vcpu->arch.sie_block->cputm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) preempt_disable(); /* protect from TOD sync and vcpu_load/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) * If the writer would ever execute a read in the critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) * section, e.g. in irq context, we have a deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) value = vcpu->arch.sie_block->cputm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) /* if cputm_start is 0, accounting is being started/stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) if (likely(vcpu->arch.cputm_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) gmap_enable(vcpu->arch.enabled_gmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) __start_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) vcpu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) vcpu->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) __stop_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) vcpu->arch.enabled_gmap = gmap_get_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) gmap_disable(vcpu->arch.enabled_gmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) mutex_lock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) if (!kvm_is_ucontrol(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) vcpu->arch.gmap = vcpu->kvm->arch.gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) sca_add_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) /* make vcpu_load load the right gmap on the first trigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) vcpu->arch.enabled_gmap = vcpu->arch.gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) static bool kvm_has_pckmo_ecc(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) /* At least one ECC subfunction must be present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return kvm_has_pckmo_subfunc(kvm, 32) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) kvm_has_pckmo_subfunc(kvm, 33) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) kvm_has_pckmo_subfunc(kvm, 34) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) kvm_has_pckmo_subfunc(kvm, 40) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) kvm_has_pckmo_subfunc(kvm, 41);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * If the AP instructions are not being interpreted and the MSAX3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) * facility is not configured for the guest, there is nothing to set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) vcpu->arch.sie_block->eca &= ~ECA_APIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) vcpu->arch.sie_block->ecd &= ~ECD_ECC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (vcpu->kvm->arch.crypto.apie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) vcpu->arch.sie_block->eca |= ECA_APIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) /* Set up protected key support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (vcpu->kvm->arch.crypto.aes_kw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) vcpu->arch.sie_block->ecb3 |= ECB3_AES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /* ecc is also wrapped with AES key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (kvm_has_pckmo_ecc(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) vcpu->arch.sie_block->ecd |= ECD_ECC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (vcpu->kvm->arch.crypto.dea_kw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) free_page(vcpu->arch.sie_block->cbrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) vcpu->arch.sie_block->cbrlo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (!vcpu->arch.sie_block->cbrlo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) vcpu->arch.sie_block->ibc = model->ibc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (test_kvm_facility(vcpu->kvm, 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) u16 uvrc, uvrrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) CPUSTAT_SM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) CPUSTAT_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) if (test_kvm_facility(vcpu->kvm, 78))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) else if (test_kvm_facility(vcpu->kvm, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) kvm_s390_vcpu_setup_model(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) if (MACHINE_HAS_ESOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) if (test_kvm_facility(vcpu->kvm, 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) vcpu->arch.sie_block->ecb |= ECB_SRSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (test_kvm_facility(vcpu->kvm, 73))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) vcpu->arch.sie_block->ecb |= ECB_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (test_kvm_facility(vcpu->kvm, 130))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) if (sclp.has_cei)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) vcpu->arch.sie_block->eca |= ECA_CEI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (sclp.has_ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) vcpu->arch.sie_block->eca |= ECA_IB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (sclp.has_siif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) vcpu->arch.sie_block->eca |= ECA_SII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (sclp.has_sigpif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) vcpu->arch.sie_block->eca |= ECA_SIGPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (test_kvm_facility(vcpu->kvm, 129)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) vcpu->arch.sie_block->eca |= ECA_VX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) if (test_kvm_facility(vcpu->kvm, 139))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) vcpu->arch.sie_block->ecd |= ECD_MEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (test_kvm_facility(vcpu->kvm, 156))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) if (vcpu->arch.sie_block->gd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) vcpu->arch.sie_block->eca |= ECA_AIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) | SDNXC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) if (sclp.has_kss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) if (vcpu->kvm->arch.use_cmma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) rc = kvm_s390_vcpu_setup_cmma(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) vcpu->arch.sie_block->hpid = HPID_KVM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) kvm_s390_vcpu_crypto_setup(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) mutex_lock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) if (kvm_s390_pv_is_protected(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) kvm_s390_vcpu_unsetup_cmma(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) struct sie_page *sie_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (!sie_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) vcpu->arch.sie_block = &sie_page->sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) /* the real guest size will always be smaller than msl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) vcpu->arch.sie_block->mso = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) vcpu->arch.sie_block->msl = sclp.hamax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) spin_lock_init(&vcpu->arch.local_int.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) vcpu->arch.sie_block->gd |= GISA_FORMAT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) seqcount_init(&vcpu->arch.cputm_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) kvm_clear_async_pf_completion_queue(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) KVM_SYNC_GPRS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) KVM_SYNC_ACRS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) KVM_SYNC_CRS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) KVM_SYNC_ARCH0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) KVM_SYNC_PFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) KVM_SYNC_DIAG318;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) kvm_s390_set_prefix(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (test_kvm_facility(vcpu->kvm, 64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) if (test_kvm_facility(vcpu->kvm, 82))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) if (test_kvm_facility(vcpu->kvm, 133))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) if (test_kvm_facility(vcpu->kvm, 156))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /* fprs can be synchronized via vrs, even if the guest has no vx. With
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) if (MACHINE_HAS_VX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) if (kvm_is_ucontrol(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) rc = __kvm_ucontrol_vcpu_init(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) goto out_free_sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) rc = kvm_s390_vcpu_setup(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) goto out_ucontrol_uninit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) out_ucontrol_uninit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) if (kvm_is_ucontrol(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) gmap_remove(vcpu->arch.gmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) out_free_sie_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) free_page((unsigned long)(vcpu->arch.sie_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) return kvm_s390_vcpu_has_irq(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) exit_sie(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) exit_sie(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) return atomic_read(&vcpu->arch.sie_block->prog20) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) (PROG_BLOCK_SIE | PROG_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) * If the CPU is not running (e.g. waiting as idle) the function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) * return immediately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) void exit_sie(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) kvm_s390_vsie_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) /* Kick a guest cpu out of SIE to process a request synchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) kvm_make_request(req, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) kvm_s390_vcpu_request(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) struct kvm *kvm = gmap->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) unsigned long prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (gmap_is_shadow(gmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) if (start >= 1UL << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) /* We are only interested in prefix pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) /* match against both prefix pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) prefix = kvm_s390_get_prefix(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) /* do not poll with more than halt_poll_max_steal percent of steal time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) halt_poll_max_steal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) vcpu->stat.halt_no_poll_steal++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) /* kvm common code refers to this, but never calls it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) case KVM_REG_S390_TODPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) r = put_user(vcpu->arch.sie_block->todpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) (u32 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) case KVM_REG_S390_EPOCHDIFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) r = put_user(vcpu->arch.sie_block->epoch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) case KVM_REG_S390_CPU_TIMER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) r = put_user(kvm_s390_get_cpu_timer(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) case KVM_REG_S390_CLOCK_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) r = put_user(vcpu->arch.sie_block->ckc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) case KVM_REG_S390_PFTOKEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) r = put_user(vcpu->arch.pfault_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) case KVM_REG_S390_PFCOMPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) r = put_user(vcpu->arch.pfault_compare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) case KVM_REG_S390_PFSELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) r = put_user(vcpu->arch.pfault_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) case KVM_REG_S390_PP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) r = put_user(vcpu->arch.sie_block->pp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) case KVM_REG_S390_GBEA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) r = put_user(vcpu->arch.sie_block->gbea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) struct kvm_one_reg *reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) int r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) __u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) switch (reg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) case KVM_REG_S390_TODPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) r = get_user(vcpu->arch.sie_block->todpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) (u32 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) case KVM_REG_S390_EPOCHDIFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) r = get_user(vcpu->arch.sie_block->epoch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) case KVM_REG_S390_CPU_TIMER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) r = get_user(val, (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) kvm_s390_set_cpu_timer(vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) case KVM_REG_S390_CLOCK_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) r = get_user(vcpu->arch.sie_block->ckc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) case KVM_REG_S390_PFTOKEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) r = get_user(vcpu->arch.pfault_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) kvm_clear_async_pf_completion_queue(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) case KVM_REG_S390_PFCOMPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) r = get_user(vcpu->arch.pfault_compare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) case KVM_REG_S390_PFSELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) r = get_user(vcpu->arch.pfault_select,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) case KVM_REG_S390_PP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) r = get_user(vcpu->arch.sie_block->pp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) case KVM_REG_S390_GBEA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) r = get_user(vcpu->arch.sie_block->gbea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) (u64 __user *)reg->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) kvm_clear_async_pf_completion_queue(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) kvm_s390_vcpu_stop(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) kvm_s390_clear_local_irqs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) /* Initial reset is a superset of the normal reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) kvm_arch_vcpu_ioctl_normal_reset(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) * This equals initial cpu reset in pop, but we don't switch to ESA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) * We do not only reset the internal data, but also ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) vcpu->arch.sie_block->gpsw.mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) vcpu->arch.sie_block->gpsw.addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) kvm_s390_set_prefix(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) kvm_s390_set_cpu_timer(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) vcpu->arch.sie_block->ckc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) /* ... the data in sync regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) vcpu->run->s.regs.ckc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) vcpu->run->psw_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) vcpu->run->psw_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) vcpu->run->s.regs.todpr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) vcpu->run->s.regs.cputm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) vcpu->run->s.regs.ckc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) vcpu->run->s.regs.pp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) vcpu->run->s.regs.gbea = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) vcpu->run->s.regs.fpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) * Do not reset these registers in the protected case, as some of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) * them are overlayed and they are not accessible in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) vcpu->arch.sie_block->gbea = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) vcpu->arch.sie_block->pp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) vcpu->arch.sie_block->todpr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) struct kvm_sync_regs *regs = &vcpu->run->s.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) /* Clear reset is a superset of the initial reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) kvm_arch_vcpu_ioctl_initial_reset(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) memset(®s->gprs, 0, sizeof(regs->gprs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) memset(®s->vrs, 0, sizeof(regs->vrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) memset(®s->acrs, 0, sizeof(regs->acrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) memset(®s->gscb, 0, sizeof(regs->gscb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) regs->etoken = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) regs->etoken_extension = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) struct kvm_sregs *sregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if (test_fp_ctl(fpu->fpc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) vcpu->run->s.regs.fpc = fpu->fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) if (MACHINE_HAS_VX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) (freg_t *) fpu->fprs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) /* make sure we have the latest values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) save_fpu_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (MACHINE_HAS_VX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) convert_vx_to_fp((freg_t *) fpu->fprs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) (__vector128 *) vcpu->run->s.regs.vrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) fpu->fpc = vcpu->run->s.regs.fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (!is_vcpu_stopped(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) vcpu->run->psw_mask = psw.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) vcpu->run->psw_addr = psw.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) struct kvm_translation *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) return -EINVAL; /* not implemented yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) KVM_GUESTDBG_USE_HW_BP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) KVM_GUESTDBG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) struct kvm_guest_debug *dbg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) vcpu->guest_debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) kvm_s390_clear_bp_data(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) if (!sclp.has_gpere) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) if (dbg->control & KVM_GUESTDBG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) vcpu->guest_debug = dbg->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) /* enforce guest PER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) rc = kvm_s390_import_bp_data(vcpu, dbg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) vcpu->arch.guestdbg.last_bp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) vcpu->guest_debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) kvm_s390_clear_bp_data(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) struct kvm_mp_state *mp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) /* CHECK_STOP and LOAD are not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) KVM_MP_STATE_OPERATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) struct kvm_mp_state *mp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) /* user space knows about this interface - let it control the state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) vcpu->kvm->arch.user_cpu_state_ctrl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) switch (mp_state->mp_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) case KVM_MP_STATE_STOPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) rc = kvm_s390_vcpu_stop(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) case KVM_MP_STATE_OPERATING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) rc = kvm_s390_vcpu_start(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) case KVM_MP_STATE_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) case KVM_MP_STATE_CHECK_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) fallthrough; /* CHECK_STOP and LOAD are not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) static bool ibs_enabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) kvm_s390_vcpu_request_handled(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (!kvm_request_pending(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) * We use MMU_RELOAD just to re-arm the ipte notifier for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) * This ensures that the ipte instruction for this request has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) * already finished. We might race against a second unmapper that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) * wants to set the blocking bit. Lets just retry the request loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) rc = gmap_mprotect_notify(vcpu->arch.gmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) kvm_s390_get_prefix(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) PAGE_SIZE * 2, PROT_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) vcpu->arch.sie_block->ihcpu = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (!ibs_enabled(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) if (ibs_enabled(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) * Disable CMM virtualization; we will emulate the ESSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) * instruction manually, in order to provide additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) * functionalities needed for live migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) * Re-enable CMM virtualization if CMMA is available and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) * CMM has been used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) if ((vcpu->kvm->arch.use_cmma) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) (vcpu->kvm->mm->context.uses_cmm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) /* nothing to do, just clear the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) kvm_clear_request(KVM_REQ_UNHALT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) /* we left the vsie handler, nothing to do, just clear the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) void kvm_s390_set_tod_clock(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) const struct kvm_s390_vm_tod_clock *gtod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) struct kvm_s390_tod_clock_ext htod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) get_tod_clock_ext((char *)&htod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) kvm->arch.epoch = gtod->tod - htod.tod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) kvm->arch.epdx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) if (test_kvm_facility(kvm, 139)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) if (kvm->arch.epoch > gtod->tod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) kvm->arch.epdx -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) kvm_s390_vcpu_block_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) vcpu->arch.sie_block->epoch = kvm->arch.epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) vcpu->arch.sie_block->epdx = kvm->arch.epdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) kvm_s390_vcpu_unblock_all(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) * kvm_arch_fault_in_page - fault-in guest page if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) * @vcpu: The corresponding virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) * @gpa: Guest physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) * @writable: Whether the page should be writable or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) * Make sure that a guest page has been faulted-in on the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) * Return: Zero on success, negative error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) return gmap_fault(vcpu->arch.gmap, gpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) writable ? FAULT_FLAG_WRITE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) unsigned long token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) struct kvm_s390_interrupt inti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) struct kvm_s390_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) if (start_token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) irq.u.ext.ext_params2 = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) irq.type = KVM_S390_INT_PFAULT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) inti.type = KVM_S390_INT_PFAULT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) inti.parm64 = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) struct kvm_async_pf *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) struct kvm_async_pf *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) struct kvm_async_pf *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) /* s390 will always inject the page directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) * s390 will always inject the page directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) * but we still want check_async_completion to cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) hva_t hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) struct kvm_arch_async_pf arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) vcpu->arch.pfault_compare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) if (psw_extint_disabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) if (kvm_s390_vcpu_has_irq(vcpu, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) if (!vcpu->arch.gmap->pfault_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) hva += current->thread.gmap_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) static int vcpu_pre_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) int rc, cpuflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) * On s390 notifications for arriving pages will be delivered directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) * to the guest but the house keeping for completed pfaults is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) * handled outside the worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) kvm_check_async_pf_completion(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) if (!kvm_is_ucontrol(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) rc = kvm_s390_deliver_pending_interrupts(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) rc = kvm_s390_handle_requests(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (guestdbg_enabled(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) kvm_s390_backup_guest_per_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) kvm_s390_patch_guest_per_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) vcpu->arch.sie_block->icptcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) trace_kvm_s390_sie_enter(vcpu, cpuflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) struct kvm_s390_pgm_info pgm_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) .code = PGM_ADDRESSING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) u8 opcode, ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) trace_kvm_s390_sie_fault(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) * We want to inject an addressing exception, which is defined as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) * suppressing or terminating exception. However, since we came here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) * by a DAT access exception, the PSW still points to the faulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) * instruction since DAT exceptions are nullifying. So we've got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) * to look up the current opcode to get the length of the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) * to be able to forward the PSW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) ilen = insn_length(opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) } else if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) /* Instruction-Fetching Exceptions - we can't detect the ilen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * Forward by arbitrary ilc, injection will take care of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) * nullification if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) pgm_info = vcpu->arch.pgm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) ilen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) kvm_s390_forward_psw(vcpu, ilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) struct mcck_volatile_info *mcck_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) struct sie_page *sie_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) vcpu->arch.sie_block->icptcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) if (guestdbg_enabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) kvm_s390_restore_guest_per_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) if (exit_reason == -EINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) VCPU_EVENT(vcpu, 3, "%s", "machine check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) sie_page = container_of(vcpu->arch.sie_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) struct sie_page, sie_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) mcck_info = &sie_page->mcck_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) kvm_s390_reinject_machine_check(vcpu, mcck_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) if (vcpu->arch.sie_block->icptcode > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) int rc = kvm_handle_sie_intercept(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) if (rc != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) return -EREMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) } else if (exit_reason != -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) vcpu->stat.exit_null++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) } else if (kvm_is_ucontrol(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) vcpu->run->s390_ucontrol.trans_exc_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) current->thread.gmap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) vcpu->run->s390_ucontrol.pgm_code = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) return -EREMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) } else if (current->thread.gmap_pfault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) trace_kvm_s390_major_guest_pfault(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) current->thread.gmap_pfault = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (kvm_arch_setup_async_pf(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) return vcpu_post_run_fault_in_sie(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) static int __vcpu_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) int rc, exit_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) * We try to hold kvm->srcu during most of vcpu_run (except when run-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) * ning the guest), so that memslots (and other stuff) are protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) rc = vcpu_pre_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) * As PF_VCPU will be used in fault handler, between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) * guest_enter and guest_exit should be no uaccess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) guest_enter_irqoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) __disable_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) memcpy(sie_page->pv_grregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) vcpu->run->s.regs.gprs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) sizeof(sie_page->pv_grregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) exit_reason = sie64a(vcpu->arch.sie_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) vcpu->run->s.regs.gprs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) memcpy(vcpu->run->s.regs.gprs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) sie_page->pv_grregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) sizeof(sie_page->pv_grregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) * We're not allowed to inject interrupts on intercepts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) * that leave the guest state in an "in-between" state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) * where the next SIE entry will do a continuation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) * Fence interrupts in our "internal" PSW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) __enable_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) guest_exit_irqoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) rc = vcpu_post_run(vcpu, exit_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) struct kvm_run *kvm_run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) struct runtime_instr_cb *riccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) struct gs_cb *gscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) vcpu->arch.pfault_token = kvm_run->s.regs.pft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) kvm_clear_async_pf_completion_queue(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) * If userspace sets the riccb (e.g. after migration) to a valid state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) * we should enable RI here instead of doing the lazy enablement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) test_kvm_facility(vcpu->kvm, 64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) riccb->v &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) vcpu->arch.sie_block->ecb3 |= ECB3_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) * If userspace sets the gscb (e.g. after migration) to non-zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) * we should enable GS here instead of doing the lazy enablement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) test_kvm_facility(vcpu->kvm, 133) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) gscb->gssm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) !vcpu->arch.gs_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) vcpu->arch.sie_block->ecb |= ECB_GS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) vcpu->arch.gs_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) test_kvm_facility(vcpu->kvm, 82)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) if (MACHINE_HAS_GS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) __ctl_set_bit(2, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) if (current->thread.gs_cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) vcpu->arch.host_gscb = current->thread.gs_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) save_gs_cb(vcpu->arch.host_gscb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) if (vcpu->arch.gs_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) current->thread.gs_cb = (struct gs_cb *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) &vcpu->run->s.regs.gscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) restore_gs_cb(current->thread.gs_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) /* SIE will load etoken directly from SDNX and therefore kvm_run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) static void sync_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) struct kvm_run *kvm_run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) /* some control register changes require a tlb flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) save_access_regs(vcpu->arch.host_acrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) restore_access_regs(vcpu->run->s.regs.acrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) /* save host (userspace) fprs/vrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) save_fpu_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) if (MACHINE_HAS_VX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) current->thread.fpu.regs = vcpu->run->s.regs.vrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) current->thread.fpu.regs = vcpu->run->s.regs.fprs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) if (test_fp_ctl(current->thread.fpu.fpc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) /* User space provided an invalid FPC, let's clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) current->thread.fpu.fpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) /* Sync fmt2 only data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) sync_regs_fmt2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) * In several places we have to modify our internal view to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) * not do things that are disallowed by the ultravisor. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) * example we must not inject interrupts after specific exits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) * (e.g. 112 prefix page not secure). We do this by turning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) * off the machine check, external and I/O interrupt bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) * of our PSW copy. To avoid getting validity intercepts, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) * do only accept the condition code from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) PSW_MASK_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) kvm_run->kvm_dirty_regs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) static void store_regs_fmt2(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) struct kvm_run *kvm_run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) if (MACHINE_HAS_GS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) __ctl_set_bit(2, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) if (vcpu->arch.gs_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) save_gs_cb(current->thread.gs_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) current->thread.gs_cb = vcpu->arch.host_gscb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) restore_gs_cb(vcpu->arch.host_gscb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) if (!vcpu->arch.host_gscb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) __ctl_clear_bit(2, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) vcpu->arch.host_gscb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) /* SIE will save etoken directly into SDNX and therefore kvm_run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) static void store_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) struct kvm_run *kvm_run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) kvm_run->s.regs.pft = vcpu->arch.pfault_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) save_access_regs(vcpu->run->s.regs.acrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) restore_access_regs(vcpu->arch.host_acrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) /* Save guest register state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) save_fpu_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) /* Restore will be done lazily at return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) store_regs_fmt2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) struct kvm_run *kvm_run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) if (kvm_run->immediate_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) if (guestdbg_exit_pending(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) kvm_s390_prepare_debug_exit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) kvm_sigset_activate(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) * no need to check the return value of vcpu_start as it can only have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) * an error for protvirt, but protvirt means user cpu state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) kvm_s390_vcpu_start(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) } else if (is_vcpu_stopped(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) pr_err_ratelimited("can't run stopped vcpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) sync_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) enable_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) rc = __vcpu_run(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) if (signal_pending(current) && !rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) kvm_run->exit_reason = KVM_EXIT_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) rc = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) if (guestdbg_exit_pending(vcpu) && !rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) kvm_s390_prepare_debug_exit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) if (rc == -EREMOTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) /* userspace support is needed, kvm_run has been prepared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) disable_cpu_timer_accounting(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) store_regs(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) kvm_sigset_deactivate(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) vcpu->stat.exit_userspace++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) * store status at address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) * we use have two special cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) unsigned char archmode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) freg_t fprs[NUM_FPRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) unsigned int px;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) u64 clkcomp, cputm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) px = kvm_s390_get_prefix(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) if (write_guest_abs(vcpu, 163, &archmode, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) gpa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) if (write_guest_real(vcpu, 163, &archmode, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) gpa = px;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) gpa -= __LC_FPREGS_SAVE_AREA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) /* manually convert vector registers if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) if (MACHINE_HAS_VX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) fprs, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) vcpu->run->s.regs.fprs, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) vcpu->run->s.regs.gprs, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) &vcpu->arch.sie_block->gpsw, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) &px, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) &vcpu->run->s.regs.fpc, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) &vcpu->arch.sie_block->todpr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) cputm = kvm_s390_get_cpu_timer(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) &cputm, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) clkcomp = vcpu->arch.sie_block->ckc >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) &clkcomp, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) &vcpu->run->s.regs.acrs, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) &vcpu->arch.sie_block->gcr, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) return rc ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) * switch in the run ioctl. Let's update our copies before we save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) * it into the save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) save_fpu_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) save_access_regs(vcpu->run->s.regs.acrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) return kvm_s390_store_status_unloaded(vcpu, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) __disable_ibs_on_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) if (!sclp.has_ibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) int i, online_vcpus, r = 0, started_vcpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) if (!is_vcpu_stopped(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) /* Only one cpu at a time may enter/leave the STOPPED state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) spin_lock(&vcpu->kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) /* Let's tell the UV that we want to change into the operating state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) spin_unlock(&vcpu->kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) for (i = 0; i < online_vcpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) started_vcpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) if (started_vcpus == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) /* we're the only active VCPU -> speed it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) __enable_ibs_on_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) } else if (started_vcpus == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) * As we are starting a second VCPU, we have to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) * the IBS facility on all VCPUs to remove potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) * oustanding ENABLE requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) __disable_ibs_on_all_vcpus(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) * The real PSW might have changed due to a RESTART interpreted by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) * ultravisor. We block all interrupts and let the next sie exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) * refresh our view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) if (kvm_s390_pv_cpu_is_protected(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) * Another VCPU might have used IBS while we were offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) * Let's play safe and flush the VCPU at startup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) spin_unlock(&vcpu->kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) int i, online_vcpus, r = 0, started_vcpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) struct kvm_vcpu *started_vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) if (is_vcpu_stopped(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) /* Only one cpu at a time may enter/leave the STOPPED state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) spin_lock(&vcpu->kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) /* Let's tell the UV that we want to change into the stopped state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) spin_unlock(&vcpu->kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) * Set the VCPU to STOPPED and THEN clear the interrupt flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) * have been fully processed. This will ensure that the VCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) kvm_s390_clear_stop_irq(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) __disable_ibs_on_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) for (i = 0; i < online_vcpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) started_vcpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) started_vcpu = vcpu->kvm->vcpus[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) if (started_vcpus == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) * As we only have one VCPU left, we want to enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) * IBS facility for that VCPU to speed it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) __enable_ibs_on_vcpu(started_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) spin_unlock(&vcpu->kvm->arch.start_stop_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) struct kvm_enable_cap *cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) if (cap->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) switch (cap->cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) case KVM_CAP_S390_CSS_SUPPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) if (!vcpu->kvm->arch.css_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) vcpu->kvm->arch.css_support = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) trace_kvm_s390_enable_css(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) struct kvm_s390_mem_op *mop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) void __user *uaddr = (void __user *)mop->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) if (mop->flags || !mop->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) if (mop->size + mop->sida_offset < mop->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) if (!kvm_s390_pv_cpu_is_protected(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) switch (mop->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) case KVM_S390_MEMOP_SIDA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) mop->sida_offset), mop->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) case KVM_S390_MEMOP_SIDA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) mop->sida_offset), uaddr, mop->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) struct kvm_s390_mem_op *mop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) void __user *uaddr = (void __user *)mop->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) void *tmpbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) | KVM_S390_MEMOP_F_CHECK_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) if (mop->size > MEM_OP_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) if (kvm_s390_pv_cpu_is_protected(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) tmpbuf = vmalloc(mop->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) if (!tmpbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) switch (mop->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) case KVM_S390_MEMOP_LOGICAL_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) r = check_gva_range(vcpu, mop->gaddr, mop->ar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) mop->size, GACC_FETCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) if (r == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) if (copy_to_user(uaddr, tmpbuf, mop->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) case KVM_S390_MEMOP_LOGICAL_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) r = check_gva_range(vcpu, mop->gaddr, mop->ar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) mop->size, GACC_STORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) if (copy_from_user(tmpbuf, uaddr, mop->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) vfree(tmpbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) struct kvm_s390_mem_op *mop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) int r, srcu_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) switch (mop->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) case KVM_S390_MEMOP_LOGICAL_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) case KVM_S390_MEMOP_LOGICAL_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) r = kvm_s390_guest_mem_op(vcpu, mop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) case KVM_S390_MEMOP_SIDA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) case KVM_S390_MEMOP_SIDA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) /* we are locked against sida going away by the vcpu->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) r = kvm_s390_guest_sida_op(vcpu, mop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) long kvm_arch_vcpu_async_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) struct kvm_vcpu *vcpu = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) case KVM_S390_IRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) struct kvm_s390_irq s390irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) return kvm_s390_inject_vcpu(vcpu, &s390irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) case KVM_S390_INTERRUPT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) struct kvm_s390_interrupt s390int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) struct kvm_s390_irq s390irq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) if (copy_from_user(&s390int, argp, sizeof(s390int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) if (s390int_to_s390irq(&s390int, &s390irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) return kvm_s390_inject_vcpu(vcpu, &s390irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) long kvm_arch_vcpu_ioctl(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) unsigned int ioctl, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) struct kvm_vcpu *vcpu = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) long r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) u16 rc, rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) vcpu_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) case KVM_S390_STORE_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) idx = srcu_read_lock(&vcpu->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) r = kvm_s390_store_status_unloaded(vcpu, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) srcu_read_unlock(&vcpu->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) case KVM_S390_SET_INITIAL_PSW: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) psw_t psw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) if (copy_from_user(&psw, argp, sizeof(psw)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) case KVM_S390_CLEAR_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) kvm_arch_vcpu_ioctl_clear_reset(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) case KVM_S390_INITIAL_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) kvm_arch_vcpu_ioctl_initial_reset(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) UVC_CMD_CPU_RESET_INITIAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) &rc, &rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) case KVM_S390_NORMAL_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) kvm_arch_vcpu_ioctl_normal_reset(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) UVC_CMD_CPU_RESET, &rc, &rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) case KVM_SET_ONE_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) case KVM_GET_ONE_REG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) struct kvm_one_reg reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) if (kvm_s390_pv_cpu_is_protected(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) if (copy_from_user(®, argp, sizeof(reg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) if (ioctl == KVM_SET_ONE_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) #ifdef CONFIG_KVM_S390_UCONTROL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) case KVM_S390_UCAS_MAP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) struct kvm_s390_ucas_mapping ucasmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) if (!kvm_is_ucontrol(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) ucasmap.vcpu_addr, ucasmap.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) case KVM_S390_UCAS_UNMAP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) struct kvm_s390_ucas_mapping ucasmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) if (!kvm_is_ucontrol(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) ucasmap.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) case KVM_S390_VCPU_FAULT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) r = gmap_fault(vcpu->arch.gmap, arg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) case KVM_ENABLE_CAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) struct kvm_enable_cap cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) if (copy_from_user(&cap, argp, sizeof(cap)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) case KVM_S390_MEM_OP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) struct kvm_s390_mem_op mem_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) case KVM_S390_SET_IRQ_STATE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) struct kvm_s390_irq_state irq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) if (irq_state.len > VCPU_IRQS_MAX_BUF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) irq_state.len == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) /* do not use irq_state.flags, it will break old QEMUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) r = kvm_s390_set_irq_state(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) (void __user *) irq_state.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) irq_state.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) case KVM_S390_GET_IRQ_STATE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) struct kvm_s390_irq_state irq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) if (irq_state.len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) /* do not use irq_state.flags, it will break old QEMUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) r = kvm_s390_get_irq_state(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) (__u8 __user *) irq_state.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) irq_state.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) r = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) #ifdef CONFIG_KVM_S390_UCONTROL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) && (kvm_is_ucontrol(vcpu->kvm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) vmf->page = virt_to_page(vcpu->arch.sie_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) get_page(vmf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) /* Section: memory related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) int kvm_arch_prepare_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) struct kvm_memory_slot *memslot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) /* A few sanity checks. We can have memory slots which have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) located/ended at a segment boundary (1MB). The memory in userland is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) ok to be fragmented into various different vmas. It is okay to mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) and munmap() stuff in this slot after doing this call at any time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) if (mem->userspace_addr & 0xffffful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) if (mem->memory_size & 0xffffful)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) /* When we are protected, we should not change the memory slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) if (kvm_s390_pv_get_handle(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) void kvm_arch_commit_memory_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) const struct kvm_userspace_memory_region *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) struct kvm_memory_slot *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) const struct kvm_memory_slot *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) enum kvm_mr_change change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) switch (change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) case KVM_MR_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) old->npages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) case KVM_MR_MOVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) old->npages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) case KVM_MR_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) mem->guest_phys_addr, mem->memory_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) case KVM_MR_FLAGS_ONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) pr_warn("failed to commit memory region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) static inline unsigned long nonhyp_mask(int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) vcpu->valid_wakeup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) static int __init kvm_s390_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) if (!sclp.has_sief2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) pr_info("SIE is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) if (nested && hpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) kvm_s390_fac_base[i] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) static void __exit kvm_s390_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) kvm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) module_init(kvm_s390_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) module_exit(kvm_s390_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) * Enable autoloading of the kvm module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) * Note that we add the module alias here instead of virt/kvm/kvm_main.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) * since x86 takes a different approach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) MODULE_ALIAS_MISCDEV(KVM_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) MODULE_ALIAS("devname:kvm");