Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * access guest memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright IBM Corp. 2008, 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *    Author(s): Carsten Otte <cotte@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #ifndef __KVM_S390_GACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define __KVM_S390_GACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "kvm-s390.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * kvm_s390_real_to_abs - convert guest real address to guest absolute address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * @prefix - guest prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * @gra - guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Returns the guest absolute address that corresponds to the passed guest real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * address @gra of by applying the given prefix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	if (gra < 2 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		gra += prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		gra -= prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	return gra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * kvm_s390_real_to_abs - convert guest real address to guest absolute address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * @vcpu - guest virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * @gra - guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * Returns the guest absolute address that corresponds to the passed guest real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * address @gra of a virtual guest cpu by applying its prefix.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 						 unsigned long gra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * _kvm_s390_logical_to_effective - convert guest logical to effective address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * @psw: psw of the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * @ga: guest logical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * Convert a guest logical address to an effective address by applying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * rules of the addressing mode defined by bits 31 and 32 of the given PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * (extendended/basic addressing mode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Depending on the addressing mode, the upper 40 bits (24 bit addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * mode) of @ga will be zeroed and the remaining bits will be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 							   unsigned long ga)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return ga;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		return ga & ((1UL << 31) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return ga & ((1UL << 24) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * kvm_s390_logical_to_effective - convert guest logical to effective address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * @vcpu: guest virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * @ga: guest logical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * Convert a guest vcpu logical address to a guest vcpu effective address by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * applying the rules of the vcpu's addressing mode defined by PSW bits 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * and 32 (extendended/basic addressing mode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * of @ga will be zeroed and the remaining bits will be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 							  unsigned long ga)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * which shall only be used to access the lowcore of a vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * These functions should be used for e.g. interrupt handlers where no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * guest memory access protection facilities, like key or low address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * protection, are applicable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * At a later point guest vcpu lowcore access should happen via pinned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * prefix pages, so that these pages can be accessed directly via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * kernel mapping. All of these *_lc functions can be removed then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * put_guest_lc - write a simple variable to a guest vcpu's lowcore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * @x: value to copy to guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * @gra: vcpu's destination guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * Copies a simple value from kernel space to a guest vcpu's lowcore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * must be located in the vcpu's lowcore. Otherwise the result is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * Note: an error indicates that either the kernel is out of memory or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  *	 the guest memory mapping is broken. In any case the best solution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  *	 would be to terminate the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *	 It is wrong to inject a guest exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define put_guest_lc(vcpu, x, gra)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct kvm_vcpu *__vcpu = (vcpu);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	__typeof__(*(gra)) __x = (x);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	unsigned long __gpa;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	__gpa = (unsigned long)(gra);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	__gpa += kvm_s390_get_prefix(__vcpu);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * @gra: vcpu's source guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * @data: source address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * Copy data from kernel space to guest vcpu's lowcore. The entire range must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * be located within the vcpu's lowcore, otherwise the result is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * Note: an error indicates that either the kernel is out of memory or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *	 the guest memory mapping is broken. In any case the best solution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *	 would be to terminate the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *	 It is wrong to inject a guest exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		   unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @gra: vcpu's source guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * @data: destination address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * Copy data from guest vcpu's lowcore to kernel space. The entire range must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * be located within the vcpu's lowcore, otherwise the result is undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * Note: an error indicates that either the kernel is out of memory or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *	 the guest memory mapping is broken. In any case the best solution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *	 would be to terminate the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *	 It is wrong to inject a guest exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		  unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) enum gacc_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	GACC_FETCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	GACC_STORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	GACC_IFETCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			    u8 ar, unsigned long *gpa, enum gacc_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		    unsigned long length, enum gacc_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		 unsigned long len, enum gacc_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		      void *data, unsigned long len, enum gacc_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * write_guest - copy data from kernel space to guest space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * @ga: guest address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * @ar: access register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * @data: source address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * Copy @len bytes from @data (kernel space) to @ga (guest address).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * In order to copy data to guest space the PSW of the vcpu is inspected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * If DAT is off data will be copied to guest real or absolute memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * If DAT is on data will be copied to the address space as specified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * the address space bits of the PSW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * Primary, secondary, home space or access register mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  * The addressing mode of the PSW is also inspected, so that address wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * around is taken into account for 24-, 31- and 64-bit addressing mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  * if the to be copied data crosses page boundaries in guest address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  * In addition also low address and DAT protection are inspected before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * copying any data (key protection is currently not implemented).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * In case of an access exception (e.g. protection exception) pgm will contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * will inject a correct exception into the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * If no access exception happened, the contents of pgm are undefined when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * Returns:  - zero on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *	     - a negative value if e.g. the guest mapping is broken or in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *	       case of out-of-memory. In this case the contents of pgm are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *	       undefined. Also parts of @data may have been copied to guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *	       space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *	     - a positive value if an access exception happened. In this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *	       the returned value is the program interruption code and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  *	       contents of pgm may be used to inject an exception into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *	       guest. No data has been copied to guest space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * Note: in case an access exception is recognized no data has been copied to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  *	 guest space (this is also true, if the to be copied data would cross
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  *	 one or more page boundaries in guest space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  *	 Therefore this function may be used for nullifying and suppressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  *	 instruction emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  *	 It may also be used for terminating instructions, if it is undefined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  *	 if data has been changed in guest space in case of an exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * read_guest - copy data from guest space to kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * @ga: guest address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * @ar: access register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * @data: destination address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * Copy @len bytes from @ga (guest address) to @data (kernel space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * The behaviour of read_guest is identical to write_guest, except that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * data will be copied from guest space to kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	       unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * read_guest_instr - copy instruction data from guest space to kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * @ga: guest address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * @data: destination address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * Copy @len bytes from the given address (guest space) to @data (kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * The behaviour of read_guest_instr is identical to read_guest, except that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * instruction data will be read from primary space when in home-space or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * address-space mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		     unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	return access_guest(vcpu, ga, 0, data, len, GACC_IFETCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * write_guest_abs - copy data from kernel space to guest space absolute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * @gpa: guest physical (absolute) address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * @data: source address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * It is up to the caller to ensure that the entire guest memory range is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * valid memory before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * Guest low address and key protection are not checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * If an error occurs data may have been copied partially to guest memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		    unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * read_guest_abs - copy data from guest space absolute to kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * @gpa: guest physical (absolute) address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * @data: destination address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * It is up to the caller to ensure that the entire guest memory range is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * valid memory before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * Guest key protection is not checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * If an error occurs data may have been copied partially to kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		   unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * write_guest_real - copy data from kernel space to guest space real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * @gra: guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * @data: source address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * Copy @len bytes from @data (kernel space) to @gra (guest real address).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * It is up to the caller to ensure that the entire guest memory range is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * valid memory before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * Guest low address and key protection are not checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * If an error occurs data may have been copied partially to guest memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		     unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return access_guest_real(vcpu, gra, data, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * read_guest_real - copy data from guest space real to kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * @vcpu: virtual cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  * @gra: guest real address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  * @data: destination address in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * @len: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * Copy @len bytes from @gra (guest real address) to @data (kernel space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * It is up to the caller to ensure that the entire guest memory range is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * valid memory before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * Guest key protection is not checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  * Returns zero on success or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * If an error occurs data may have been copied partially to kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static inline __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		    unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	return access_guest_real(vcpu, gra, data, len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void ipte_lock(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void ipte_unlock(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int ipte_lock_held(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* MVPG PEI indication bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define PEI_DAT_PROT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define PEI_NOT_PTE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			  unsigned long saddr, unsigned long *datptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #endif /* __KVM_S390_GACCESS_H */