Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * in-kernel handling for sie intercepts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright IBM Corp. 2008, 2020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *    Author(s): Carsten Otte <cotte@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *               Christian Borntraeger <borntraeger@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/sysinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "kvm-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "gaccess.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "trace-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	u8 ilen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	switch (vcpu->arch.sie_block->icptcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	case ICPT_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	case ICPT_INSTPROGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	case ICPT_OPEREXC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	case ICPT_PARTEXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	case ICPT_IOINST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		/* instruction only stored for these icptcodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		/* Use the length of the EXECUTE instruction if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		if (sie_block->icptstatus & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			ilen = (sie_block->icptstatus >> 4) & 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			if (!ilen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 				ilen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	case ICPT_PROGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		/* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		ilen = vcpu->arch.sie_block->pgmilc & 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	return ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static int handle_stop(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	uint8_t flags, stop_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	vcpu->stat.exit_stop_request++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* delay the stop if any non-stop irq is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (kvm_s390_vcpu_has_irq(vcpu, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* avoid races with the injection/SIGP STOP code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	spin_lock(&li->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	flags = li->irq.stop.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	spin_unlock(&li->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	trace_kvm_s390_stop_request(stop_pending, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (!stop_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		rc = kvm_s390_vcpu_store_status(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 						KVM_S390_STORE_STATUS_NOADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * no need to check the return value of vcpu_stop as it can only have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * an error for protvirt, but protvirt means user cpu state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		kvm_s390_vcpu_stop(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static int handle_validity(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int viwhy = vcpu->arch.sie_block->ipb >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	vcpu->stat.exit_validity++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	trace_kvm_s390_intercept_validity(vcpu, viwhy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		  current->pid, vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* do not warn on invalid runtime instrumentation mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		  viwhy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int handle_instruction(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	vcpu->stat.exit_instruction++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	trace_kvm_s390_intercept_instruction(vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 					     vcpu->arch.sie_block->ipa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 					     vcpu->arch.sie_block->ipb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	switch (vcpu->arch.sie_block->ipa >> 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	case 0x01:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		return kvm_s390_handle_01(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	case 0x82:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return kvm_s390_handle_lpsw(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	case 0x83:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return kvm_s390_handle_diag(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	case 0xaa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return kvm_s390_handle_aa(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	case 0xae:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		return kvm_s390_handle_sigp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	case 0xb2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return kvm_s390_handle_b2(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	case 0xb6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		return kvm_s390_handle_stctl(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	case 0xb7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		return kvm_s390_handle_lctl(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	case 0xb9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return kvm_s390_handle_b9(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	case 0xe3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return kvm_s390_handle_e3(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	case 0xe5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return kvm_s390_handle_e5(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	case 0xeb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		return kvm_s390_handle_eb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct kvm_s390_pgm_info pgm_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		.code = vcpu->arch.sie_block->iprcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		/* the PSW has already been rewound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		.flags = KVM_S390_PGM_FLAGS_NO_REWIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	case PGM_AFX_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	case PGM_ASX_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	case PGM_EX_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	case PGM_LFX_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	case PGM_LSTE_SEQUENCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	case PGM_LSX_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	case PGM_LX_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	case PGM_PRIMARY_AUTHORITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	case PGM_SECONDARY_AUTHORITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	case PGM_SPACE_SWITCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	case PGM_ALEN_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	case PGM_ALE_SEQUENCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	case PGM_ASTE_INSTANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	case PGM_ASTE_SEQUENCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	case PGM_ASTE_VALIDITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	case PGM_EXTENDED_AUTHORITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	case PGM_ASCE_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	case PGM_PAGE_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	case PGM_REGION_FIRST_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	case PGM_REGION_SECOND_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	case PGM_REGION_THIRD_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	case PGM_SEGMENT_TRANSLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	case PGM_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	case PGM_VECTOR_PROCESSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	case PGM_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	case PGM_PROTECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (vcpu->arch.sie_block->iprcc & PGM_PER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		pgm_info.per_code = vcpu->arch.sie_block->perc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		pgm_info.per_address = vcpu->arch.sie_block->peraddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * restore ITDB to program-interruption TDB in guest lowcore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * and set TX abort indication if required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static int handle_itdb(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct kvm_s390_itdb *itdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (current->thread.per_flags & PER_FLAG_NO_TE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	memset(itdb, 0, sizeof(*itdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int handle_prog(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	psw_t psw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	vcpu->stat.exit_program_interruption++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * Intercept 8 indicates a loop of specification exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 * for protected guests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (kvm_s390_pv_cpu_is_protected(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		rc = kvm_s390_handle_per_event(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		/* the interrupt might have been filtered out completely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		if (vcpu->arch.sie_block->iprcc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		/* Avoid endless loops of specification exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		if (!is_valid_psw(&psw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	rc = handle_itdb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return inject_prog_on_prog_intercept(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * handle_external_interrupt - used for external interruption interceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * the new PSW does not have external interrupts disabled. In the first case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * we've got to deliver the interrupt manually, and in the second case, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * drop to userspace to handle the situation there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int handle_external_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	u16 eic = vcpu->arch.sie_block->eic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct kvm_s390_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	psw_t newpsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	vcpu->stat.exit_external_interrupt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* We can not handle clock comparator or timer interrupt with bad PSW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	    (newpsw.mask & PSW_MASK_EXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	switch (eic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	case EXT_IRQ_CLK_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		irq.type = KVM_S390_INT_CLOCK_COMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	case EXT_IRQ_CPU_TIMER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		irq.type = KVM_S390_INT_CPU_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	case EXT_IRQ_EXTERNAL_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		irq.type = KVM_S390_INT_EXTERNAL_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		rc = kvm_s390_inject_vcpu(vcpu, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		/* ignore if another external call is already pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		if (rc == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	return kvm_s390_inject_vcpu(vcpu, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * Handle MOVE PAGE partial execution interception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * This interception can only happen for guests with DAT disabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * addresses that are currently not mapped in the host. Thus we try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * set up the mappings for the corresponding user pages here (or throw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * addressing exceptions in case of illegal guest addresses).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	unsigned long srcaddr, dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	int reg1, reg2, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	/* Make sure that the source is paged-in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				     reg2, &srcaddr, GACC_FETCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		return kvm_s390_inject_prog_cond(vcpu, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	/* Make sure that the destination is paged-in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				     reg1, &dstaddr, GACC_STORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return kvm_s390_inject_prog_cond(vcpu, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	kvm_s390_retry_instr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int handle_partial_execution(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	vcpu->stat.exit_pei++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	if (vcpu->arch.sie_block->ipa == 0xb254)	/* MVPG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		return handle_mvpg_pei(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (vcpu->arch.sie_block->ipa >> 8 == 0xae)	/* SIGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		return kvm_s390_handle_sigp_pei(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * Handle the sthyi instruction that provides the guest with system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * information, like current CPU resources available at each level of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * the machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int handle_sthyi(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	int reg1, reg2, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	u64 code, addr, cc = 0, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct sthyi_sctns *sctns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	if (!test_kvm_facility(vcpu->kvm, 74))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	code = vcpu->run->s.regs.gprs[reg1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	addr = vcpu->run->s.regs.gprs[reg2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	vcpu->stat.instruction_sthyi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	trace_kvm_s390_handle_sthyi(vcpu, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (code & 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		cc = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		rc = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	sctns = (void *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (!sctns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	cc = sthyi_fill(sctns, &rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (!cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		if (kvm_s390_pv_cpu_is_protected(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			       sctns, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 				free_page((unsigned long)sctns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				return kvm_s390_inject_prog_cond(vcpu, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	free_page((unsigned long)sctns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	vcpu->run->s.regs.gprs[reg2 + 1] = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	kvm_s390_set_psw_cc(vcpu, cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static int handle_operexc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	psw_t oldpsw, newpsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	vcpu->stat.exit_operation_exception++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 				      vcpu->arch.sie_block->ipb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (vcpu->arch.sie_block->ipa == 0xb256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		return handle_sthyi(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	 * Avoid endless loops of operation exceptions, if the pgm new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	 * PSW will cause a new operation exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	 * The heuristic checks if the pgm new psw is within 6 bytes before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	 * the faulting psw address (with same DAT, AS settings) and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	 * new psw is not a wait psw and the fault was not triggered by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	 * problem state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	oldpsw = vcpu->arch.sie_block->gpsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	if (oldpsw.addr - newpsw.addr <= 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	    !(newpsw.mask & PSW_MASK_WAIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	    !(oldpsw.mask & PSW_MASK_PSTATE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	    (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	    (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int handle_pv_spx(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	kvm_s390_set_prefix(vcpu, pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	trace_kvm_s390_handle_prefix(vcpu, 1, pref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int handle_pv_sclp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	 * 2 cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	 * a: an sccb answering interrupt was already pending or in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	 *    As the sccb value is not known we can simply set some value to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	 *    trigger delivery of a saved SCCB. UV will then use its saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	 *    copy of the SCCB value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	 * b: an error SCCB interrupt needs to be injected so we also inject
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	 *    a fake SCCB address. Firmware will use the proper one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	 * This makes sure, that both errors and real sccb returns will only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	 * be delivered after a notification intercept (instruction has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	 * finished) but not after others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	fi->srv_signal.ext_params |= 0x43000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static int handle_pv_uvc(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	struct uv_cb_cts uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		.header.cmd	= UVC_CMD_UNPIN_PAGE_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		.header.len	= sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		.guest_handle	= kvm_s390_pv_get_handle(vcpu->kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		.gaddr		= guest_uvcb->paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			  guest_uvcb->header.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	 * If the unpin did not succeed, the guest will exit again for the UVC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	 * and we will retry the unpin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (rc == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static int handle_pv_notification(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (vcpu->arch.sie_block->ipa == 0xb210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		return handle_pv_spx(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (vcpu->arch.sie_block->ipa == 0xb220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		return handle_pv_sclp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	if (vcpu->arch.sie_block->ipa == 0xb9a4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		return handle_pv_uvc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	return handle_instruction(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	int rc, per_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (kvm_is_ucontrol(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	switch (vcpu->arch.sie_block->icptcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	case ICPT_EXTREQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		vcpu->stat.exit_external_request++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	case ICPT_IOREQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		vcpu->stat.exit_io_request++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	case ICPT_INST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		rc = handle_instruction(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	case ICPT_PROGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		return handle_prog(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	case ICPT_EXTINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		return handle_external_interrupt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	case ICPT_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		return kvm_s390_handle_wait(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	case ICPT_VALIDITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return handle_validity(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	case ICPT_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		return handle_stop(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	case ICPT_OPEREXC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		rc = handle_operexc(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	case ICPT_PARTEXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		rc = handle_partial_execution(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	case ICPT_KSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		rc = kvm_s390_skey_check_enable(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	case ICPT_MCHKREQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	case ICPT_INT_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		 * PSW bit 13 or a CR (0, 6, 14) changed and we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		 * now be able to deliver interrupts. The pre-run code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		 * will take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	case ICPT_PV_INSTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		rc = handle_instruction(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	case ICPT_PV_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		rc = handle_pv_notification(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	case ICPT_PV_PREF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		gmap_convert_to_secure(vcpu->arch.gmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 				       kvm_s390_get_prefix(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		gmap_convert_to_secure(vcpu->arch.gmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 				       kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	/* process PER, also if the instrution is processed in user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	if (vcpu->arch.sie_block->icptstatus & 0x02 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	    (!rc || rc == -EOPNOTSUPP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	return per_rc ? per_rc : rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }