Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Ultravisor Interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright IBM Corp. 2019
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author(s):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *	Vasily Gorbik <gor@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *	Janosch Frank <frankja@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifndef _ASM_S390_UV_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define _ASM_S390_UV_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/gmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define UVC_RC_EXECUTED		0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define UVC_RC_INV_CMD		0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define UVC_RC_INV_STATE	0x0003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define UVC_RC_INV_LEN		0x0005
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define UVC_RC_NO_RESUME	0x0007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define UVC_RC_NEED_DESTROY	0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define UVC_CMD_QUI			0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define UVC_CMD_INIT_UV			0x000f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define UVC_CMD_CREATE_SEC_CONF		0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define UVC_CMD_DESTROY_SEC_CONF	0x0101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define UVC_CMD_CREATE_SEC_CPU		0x0120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define UVC_CMD_DESTROY_SEC_CPU		0x0121
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define UVC_CMD_CONV_TO_SEC_STOR	0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define UVC_CMD_CONV_FROM_SEC_STOR	0x0201
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define UVC_CMD_DESTR_SEC_STOR		0x0202
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define UVC_CMD_SET_SEC_CONF_PARAMS	0x0300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define UVC_CMD_UNPACK_IMG		0x0301
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define UVC_CMD_VERIFY_IMG		0x0302
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define UVC_CMD_CPU_RESET		0x0310
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define UVC_CMD_CPU_RESET_INITIAL	0x0311
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define UVC_CMD_PREPARE_RESET		0x0320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define UVC_CMD_CPU_RESET_CLEAR		0x0321
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define UVC_CMD_CPU_SET_STATE		0x0330
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define UVC_CMD_SET_UNSHARE_ALL		0x0340
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define UVC_CMD_PIN_PAGE_SHARED		0x0341
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define UVC_CMD_UNPIN_PAGE_SHARED	0x0342
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define UVC_CMD_SET_SHARED_ACCESS	0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define UVC_CMD_REMOVE_SHARED_ACCESS	0x1001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /* Bits in installed uv calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) enum uv_cmds_inst {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	BIT_UVC_CMD_QUI = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	BIT_UVC_CMD_INIT_UV = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	BIT_UVC_CMD_CREATE_SEC_CONF = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	BIT_UVC_CMD_CREATE_SEC_CPU = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	BIT_UVC_CMD_SET_SEC_PARMS = 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	BIT_UVC_CMD_UNPACK_IMG = 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	BIT_UVC_CMD_VERIFY_IMG = 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	BIT_UVC_CMD_CPU_RESET = 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	BIT_UVC_CMD_CPU_SET_STATE = 17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	BIT_UVC_CMD_PREPARE_RESET = 18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	BIT_UVC_CMD_UNSHARE_ALL = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) enum uv_feat_ind {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	BIT_UV_FEAT_MISC = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) struct uv_cb_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u16 cmd;	/* Command Code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u16 rc;		/* Response Code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	u16 rrc;	/* Return Reason Code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /* Query Ultravisor Information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) struct uv_cb_qui {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u64 reserved08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u64 inst_calls_list[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	u64 reserved30[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	u64 uv_base_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	u64 reserved48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	u64 conf_base_phys_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	u64 conf_base_virt_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	u64 conf_virt_var_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	u64 cpu_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	u32 reserved70[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	u32 max_num_sec_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	u64 max_guest_stor_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	u8  reserved88[158 - 136];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	u16 max_guest_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	u64 uv_feature_indications;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u8  reserveda0[200 - 168];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Initialize Ultravisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct uv_cb_init {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	u64 stor_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	u64 stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	u64 reserved28[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Create Guest Configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct uv_cb_cgc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	u64 guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	u64 conf_base_stor_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	u64 conf_virt_stor_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	u64 reserved30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	u64 guest_stor_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u64 guest_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	u64 guest_sca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	u64 guest_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	u64 reserved58[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Create Secure CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct uv_cb_csc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u64 cpu_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	u64 guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	u64 stor_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	u8  reserved30[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	u16 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	u64 state_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	u64 reserved40[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Convert to Secure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct uv_cb_cts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	u64 guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	u64 gaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Convert from Secure / Pin Page Shared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct uv_cb_cfs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	u64 paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Set Secure Config Parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct uv_cb_ssc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	u64 guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	u64 sec_header_origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	u32 sec_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	u32 reserved2c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	u64 reserved30[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Unpack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct uv_cb_unp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	u64 guest_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u64 gaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	u64 tweak[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u64 reserved38[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define PV_CPU_STATE_OPR	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define PV_CPU_STATE_STP	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define PV_CPU_STATE_CHKSTP	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define PV_CPU_STATE_OPR_LOAD	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct uv_cb_cpu_set_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	u64 cpu_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	u8  reserved20[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	u8  state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	u64 reserved28[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * A common UV call struct for calls that take no payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * Examples:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * Destroy cpu/config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * Verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct uv_cb_nodata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	u64 reserved08[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	u64 reserved20[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Set Shared Access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct uv_cb_share {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	struct uv_cb_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	u64 reserved08[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	u64 paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	u64 reserved28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } __packed __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline int __uv_call(unsigned long r1, unsigned long r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		"	.insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		"	ipm	%[cc]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		"	srl	%[cc],28\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		: [cc] "=d" (cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		: [r1] "a" (r1), [r2] "a" (r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		: "memory", "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static inline int uv_call(unsigned long r1, unsigned long r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		cc = __uv_call(r1, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	} while (cc > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Low level uv_call that avoids stalls for long running busy conditions  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline int uv_call_sched(unsigned long r1, unsigned long r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		cc = __uv_call(r1, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	} while (cc > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * special variant of uv_call that only transports the cpu or guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * handle and the command, like destroy or verify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	struct uv_cb_nodata uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		.header.cmd = cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		.header.len = sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		.handle = handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	cc = uv_call_sched(0, (u64)&uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	*rc = uvcb.header.rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	*rrc = uvcb.header.rrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return cc ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct uv_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	unsigned long inst_calls_list[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	unsigned long uv_base_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	unsigned long guest_base_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	unsigned long guest_virt_base_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	unsigned long guest_virt_var_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	unsigned long guest_cpu_stor_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	unsigned long max_sec_stor_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	unsigned int max_num_sec_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned short max_guest_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	unsigned long uv_feature_indications;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) extern struct uv_info uv_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) extern int prot_virt_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline int is_prot_virt_guest(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	return prot_virt_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline int share(unsigned long addr, u16 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct uv_cb_share uvcb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		.header.cmd = cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		.header.len = sizeof(uvcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		.paddr = addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (!is_prot_virt_guest())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * Sharing is page wise, if we encounter addresses that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * not page aligned, we assume something went wrong. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * malloced structs are passed to this function, we could leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * data to the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	BUG_ON(addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (!uv_call(0, (u64)&uvcb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * Guest 2 request to the Ultravisor to make a page shared with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * hypervisor for IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * @addr: Real or absolute address of the page to be shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline int uv_set_shared(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return share(addr, UVC_CMD_SET_SHARED_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * Guest 2 request to the Ultravisor to make a page unshared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * @addr: Real or absolute address of the page to be unshared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static inline int uv_remove_shared(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define is_prot_virt_guest() 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static inline int uv_set_shared(unsigned long addr) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline int uv_remove_shared(unsigned long addr) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #if IS_ENABLED(CONFIG_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) extern int prot_virt_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static inline int is_prot_virt_host(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return prot_virt_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int uv_destroy_page(unsigned long paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int uv_convert_from_secure(unsigned long paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) void setup_uv(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void adjust_to_uv_max(unsigned long *vmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define is_prot_virt_host() 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static inline void setup_uv(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline void adjust_to_uv_max(unsigned long *vmax) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline int uv_destroy_page(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static inline int uv_convert_from_secure(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void uv_query_info(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline void uv_query_info(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #endif /* _ASM_S390_UV_H */