^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * handling diagnose instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2008, 2020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Carsten Otte <cotte@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Christian Borntraeger <borntraeger@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/gmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/virtio-ccw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "kvm-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "trace-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "gaccess.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int diag_release_pages(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long prefix = kvm_s390_get_prefix(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) vcpu->stat.diagnose_10++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) || start < 2 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * We checked for start >= end above, so lets check for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * fast path (no prefix swap page involved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) gmap_discard(vcpu->arch.gmap, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * This is slow path. gmap_discard will check for start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * so lets split this into before prefix, prefix, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * prefix and let gmap_discard make some of these calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * NOPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) gmap_discard(vcpu->arch.gmap, start, prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (start <= prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (end > prefix + PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct prs_parm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u16 code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u16 subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u16 parm_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u16 parm_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u64 token_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u64 select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u64 compare_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 zarch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct prs_parm parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) vcpu->run->s.regs.gprs[rx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) vcpu->stat.diagnose_258++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (vcpu->run->s.regs.gprs[rx] & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return kvm_s390_inject_prog_cond(vcpu, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) switch (parm.subcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) case 0: /* TOKEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "select mask 0x%llx compare mask 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) parm.token_addr, parm.select_mask, parm.compare_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * If the pagefault handshake is already activated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the token must not be changed. We have to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * decimal 8 instead, as mandated in SC24-6084.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) vcpu->run->s.regs.gprs[ry] = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) vcpu->arch.pfault_token = parm.token_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) vcpu->arch.pfault_select = parm.select_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) vcpu->arch.pfault_compare = parm.compare_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) vcpu->run->s.regs.gprs[ry] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) case 1: /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * CANCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Specification allows to let already pending tokens survive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * the cancel, therefore to reduce code complexity, we assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * all outstanding tokens are already pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (parm.token_addr || parm.select_mask ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) parm.compare_mask || parm.zarch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) vcpu->run->s.regs.gprs[ry] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * If the pfault handling was not established or is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * canceled SC24-6084 requests to return decimal 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) vcpu->run->s.regs.gprs[ry] = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) vcpu->stat.diagnose_44++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) kvm_vcpu_on_spin(vcpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct kvm_vcpu *tcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) vcpu->stat.diagnose_9c++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* yield to self */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (tid == vcpu->vcpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto no_yield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* yield to invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!tcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) goto no_yield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* target already running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (READ_ONCE(tcpu->cpu) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto no_yield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (kvm_vcpu_yield_to(tcpu) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto no_yield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) no_yield:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) vcpu->stat.diagnose_9c_ignored++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) vcpu->stat.diagnose_308++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) switch (subcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) vcpu->run->s390_reset_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * no need to check the return value of vcpu_stop as it can only have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * an error for protvirt, but protvirt means user cpu state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) kvm_s390_vcpu_stop(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) vcpu->run->s390_reset_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return -EREMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) vcpu->stat.diagnose_500++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* No virtio-ccw notification? Get out quickly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!vcpu->kvm->arch.css_support ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) (u32) vcpu->run->s.regs.gprs[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) (u32) vcpu->run->s.regs.gprs[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) vcpu->run->s.regs.gprs[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * The layout is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * - gpr 2 contains the subchannel id (passed as addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * - gpr 3 contains the virtqueue index (passed as datamatch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * - gpr 4 contains the index on the bus (optionally)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) vcpu->run->s.regs.gprs[2] & 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 8, &vcpu->run->s.regs.gprs[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) vcpu->run->s.regs.gprs[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Return cookie in gpr 2, but don't overwrite the register if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * diagnose will be handled by userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (ret != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) vcpu->run->s.regs.gprs[2] = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) trace_kvm_s390_handle_diag(vcpu, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return diag_release_pages(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) case 0x44:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return __diag_time_slice_end(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) case 0x9c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return __diag_time_slice_end_directed(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case 0x258:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return __diag_page_ref_service(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) case 0x308:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return __diag_ipl_functions(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) case 0x500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return __diag_virtio_hypercall(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) vcpu->stat.diagnose_other++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }