Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Hosting Protected Virtual Machines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright IBM Corp. 2019, 2020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *    Author(s): Janosch Frank <frankja@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/gmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "kvm-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	if (!kvm_s390_pv_cpu_get_handle(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		     vcpu->vcpu_id, *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	/* Intended memory leak for something that should never happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if (!cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		free_pages(vcpu->arch.pv.stor_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			   get_order(uv_info.guest_cpu_stor_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	free_page(sida_origin(vcpu->arch.sie_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	vcpu->arch.sie_block->pv_handle_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	vcpu->arch.sie_block->pv_handle_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	vcpu->arch.sie_block->sdf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	 * Use the reset value of gbea to avoid leaking the kernel pointer of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	 * the just freed sida.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	vcpu->arch.sie_block->gbea = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	return cc ? EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct uv_cb_csc uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		.header.cmd = UVC_CMD_CREATE_SEC_CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		.header.len = sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (kvm_s390_pv_cpu_get_handle(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 						   get_order(uv_info.guest_cpu_stor_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (!vcpu->arch.pv.stor_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/* Input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	uvcb.num = vcpu->arch.sie_block->icpua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	uvcb.state_origin = (u64)vcpu->arch.sie_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* Alloc Secure Instruction Data Area Designation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (!vcpu->arch.sie_block->sidad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		free_pages(vcpu->arch.pv.stor_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			   get_order(uv_info.guest_cpu_stor_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	cc = uv_call(0, (u64)&uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	*rc = uvcb.header.rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	*rrc = uvcb.header.rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	KVM_UV_EVENT(vcpu->kvm, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		     "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		     vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		     uvcb.header.rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/* Output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	vcpu->arch.pv.handle = uvcb.cpu_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	vcpu->arch.sie_block->sdf = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* only free resources when the destroy was successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	vfree(kvm->arch.pv.stor_var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	free_pages(kvm->arch.pv.stor_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		   get_order(uv_info.guest_base_stor_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	unsigned long base = uv_info.guest_base_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	unsigned long virt = uv_info.guest_virt_var_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	unsigned long npages = 0, vlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct kvm_memory_slot *memslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	kvm->arch.pv.stor_var = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, get_order(base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (!kvm->arch.pv.stor_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * Calculate current guest storage for allocation of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * variable storage, which is based on the length in MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * Slots are sorted by GFN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	memslot = kvm_memslots(kvm)->memslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	npages = memslot->base_gfn + memslot->npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	kvm->arch.pv.guest_len = npages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/* Allocate variable storage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	vlen += uv_info.guest_virt_base_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	kvm->arch.pv.stor_var = vzalloc(vlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (!kvm->arch.pv.stor_var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	kvm_s390_pv_dealloc_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* this should not fail, but if it does, we must not free the donated memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/* make all pages accessible before destroying the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	s390_reset_acc(kvm->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	atomic_set(&kvm->mm->context.is_protected, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/* Inteded memory leak on "impossible" error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		kvm_s390_pv_dealloc_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	return cc ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct uv_cb_cgc uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		.header.cmd = UVC_CMD_CREATE_SEC_CONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		.header.len = sizeof(uvcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	int cc, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	u16 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	ret = kvm_s390_pv_alloc_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	/* Inputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	uvcb.guest_stor_len = kvm->arch.pv.guest_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	uvcb.guest_asce = kvm->arch.gmap->asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	uvcb.guest_sca = (unsigned long)kvm->arch.sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	cc = uv_call_sched(0, (u64)&uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	*rc = uvcb.header.rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	*rrc = uvcb.header.rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		     uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	/* Outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	kvm->arch.pv.handle = uvcb.guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			kvm_s390_pv_dealloc_vm(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			      u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct uv_cb_ssc uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		.header.len = sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		.sec_header_origin = (u64)hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		.sec_header_len = length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		.guest_handle = kvm_s390_pv_get_handle(kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	int cc = uv_call(0, (u64)&uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	*rc = uvcb.header.rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	*rrc = uvcb.header.rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		     *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (!cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		atomic_set(&kvm->mm->context.is_protected, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	return cc ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		      u64 offset, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct uv_cb_unp uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		.header.cmd = UVC_CMD_UNPACK_IMG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		.header.len = sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		.guest_handle = kvm_s390_pv_get_handle(kvm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		.gaddr = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		.tweak[0] = tweak,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		.tweak[1] = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	*rc = uvcb.header.rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	*rrc = uvcb.header.rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (ret && ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			     uvcb.gaddr, *rc, *rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		       unsigned long tweak, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		     addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	while (offset < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		offset += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	struct uv_cb_cpu_set_state uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		.header.cmd	= UVC_CMD_CPU_SET_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		.header.len	= sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		.cpu_handle	= kvm_s390_pv_cpu_get_handle(vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		.state		= state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	cc = uv_call(0, (u64)&uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		     vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }