^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * xen.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Guest OS interface to Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * of this software and associated documentation files (the "Software"), to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * deal in the Software without restriction, including without limitation the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * sell copies of the Software, and to permit persons to whom the Software is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Copyright (c) 2004, K A Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifndef __XEN_PUBLIC_XEN_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define __XEN_PUBLIC_XEN_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/xen/interface.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * EAX = return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * (argument registers may be clobbered on return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * RAX = return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * (argument registers not clobbered on return; RCX, R11 are)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define __HYPERVISOR_set_trap_table 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define __HYPERVISOR_mmu_update 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define __HYPERVISOR_set_gdt 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define __HYPERVISOR_stack_switch 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define __HYPERVISOR_set_callbacks 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define __HYPERVISOR_fpu_taskswitch 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define __HYPERVISOR_sched_op_compat 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define __HYPERVISOR_platform_op 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define __HYPERVISOR_set_debugreg 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define __HYPERVISOR_get_debugreg 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define __HYPERVISOR_update_descriptor 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define __HYPERVISOR_memory_op 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define __HYPERVISOR_multicall 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define __HYPERVISOR_update_va_mapping 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define __HYPERVISOR_set_timer_op 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define __HYPERVISOR_event_channel_op_compat 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define __HYPERVISOR_xen_version 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define __HYPERVISOR_console_io 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define __HYPERVISOR_physdev_op_compat 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define __HYPERVISOR_grant_table_op 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define __HYPERVISOR_vm_assist 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define __HYPERVISOR_update_va_mapping_otherdomain 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define __HYPERVISOR_iret 23 /* x86 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define __HYPERVISOR_vcpu_op 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define __HYPERVISOR_mmuext_op 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define __HYPERVISOR_xsm_op 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define __HYPERVISOR_nmi_op 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define __HYPERVISOR_sched_op 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define __HYPERVISOR_callback_op 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define __HYPERVISOR_xenoprof_op 31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define __HYPERVISOR_event_channel_op 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define __HYPERVISOR_physdev_op 33
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define __HYPERVISOR_hvm_op 34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define __HYPERVISOR_sysctl 35
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define __HYPERVISOR_domctl 36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define __HYPERVISOR_kexec_op 37
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define __HYPERVISOR_tmem_op 38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define __HYPERVISOR_xenpmu_op 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define __HYPERVISOR_dm_op 41
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Architecture-specific hypercall definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define __HYPERVISOR_arch_0 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define __HYPERVISOR_arch_1 49
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define __HYPERVISOR_arch_2 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define __HYPERVISOR_arch_3 51
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define __HYPERVISOR_arch_4 52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define __HYPERVISOR_arch_5 53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define __HYPERVISOR_arch_6 54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define __HYPERVISOR_arch_7 55
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * VIRTUAL INTERRUPTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Virtual interrupts that a guest OS may receive from Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * The latter can be allocated only once per guest: they must initially be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * allocated to VCPU0 but can subsequently be re-bound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define VIRQ_XENPMU 13 /* PMC interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Architecture-specific VIRQ definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define VIRQ_ARCH_0 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define VIRQ_ARCH_1 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define VIRQ_ARCH_2 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define VIRQ_ARCH_3 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define VIRQ_ARCH_4 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define VIRQ_ARCH_5 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define VIRQ_ARCH_6 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define VIRQ_ARCH_7 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define NR_VIRQS 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * unsigned count, unsigned *done_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * unsigned foreigndom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @count is the length of the above array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * @pdone is an output parameter indicating number of completed operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * hypercall invocation. Can be DOMID_SELF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * in this hypercall invocation. The value of this field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * (x) encodes the PFD as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * x == 0 => PFD == DOMID_SELF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * x != 0 => PFD == x - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * -------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Updates an entry in a page table belonging to PFD. If updating an L1 table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * and the new table entry is valid/present, the mapped frame must belong to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * FD. If attempting to map an I/O page then the caller assumes the privilege
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * of the FD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * ptr[:2] -- Machine address of the page-table entry to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * val -- Value to write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * There also certain implicit requirements when using this hypercall. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * pages that make up a pagetable must be mapped read-only in the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * This prevents uncontrolled guest updates to the pagetable. Xen strictly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * enforces this, and will disallow any pagetable update which will end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * mapping pagetable page RW, and will disallow using any writable page as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * pagetable. In practice it means that when constructing a page table for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * process, thread, etc, we MUST be very dilligient in following these rules:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * the entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * or L2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * 3). Start filling out the PTE table (L1) with the PTE entries. Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * done, make sure to set each of those entries to RO (so writeable bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * is unset). Once that has been completed, set the PMD (L2) for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * PTE table as RO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * 4). When completed with all of the PMD (L2) entries, and all of them have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * been set to RO, make sure to set RO the PUD (L3). Do the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * 5). Now before you can use those pages (so setting the cr3), you MUST also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * pin them so that the hypervisor can verify the entries. This is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * For 32-bit guests, the L4 is not used (as there is less pagetables), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * instead use L3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * hypercall. Also if so desired the OS can also try to write to the PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * and be trapped by the hypervisor (as the PTE entry is RO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * To deallocate the pages, the operations are the reverse of the steps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * pagetable MUST not be in use (meaning that the cr3 is not set to it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * ptr[1:0] == MMU_MACHPHYS_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Updates an entry in the machine->pseudo-physical mapping table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * ptr[:2] -- Machine address within the frame whose mapping to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * The frame must belong to the FD, if one is specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * val -- Value to write into the mapping entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * with those in @val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @val is usually the machine frame number along with some attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * The attributes by default follow the architecture defined bits. Meaning that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * if this is a X86_64 machine and four page table layout is used, the layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * of val is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * - 63 if set means No execute (NX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * - 46-13 the machine frame number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * - 12 available for guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * - 11 available for guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * - 10 available for guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * - 9 available for guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * - 8 global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * - 6 dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * - 5 accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * - 4 page cached disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * - 3 page write through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * - 2 userspace accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * - 1 writeable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * - 0 present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * The one bits that does not fit with the default layout is the PAGE_PSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * (or 2MB) instead of using the PAGE_PSE bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * using it as the Page Attribute Table (PAT) bit - for details on it please
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * pages instead of using MTRRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * PAT4 PAT0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * +-----+-----+----+----+----+-----+----+----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * +-----+-----+----+----+----+-----+----+----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * +-----+-----+----+----+----+-----+----+----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * +-----+-----+----+----+----+-----+----+----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * The lookup of this index table translates to looking up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Bit 7, Bit 4, and Bit 3 of val entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * If all bits are off, then we are using PAT0. If bit 3 turned on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * then we are using PAT1, if bit 3 and bit 4, then PAT2..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * that if a guest that follows Linux's PAT setup and would like to set Write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * caching as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * WB = none (so PAT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * WC = PWT (bit 3 on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * UC = PWT | PCD (bit 3 and 4 are on).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * To make it work with Xen, it needs to translate the WC bit as so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * And to translate back it would:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define MMU_PT_UPDATE_PRESERVE_AD 2 /* atomically: *ptr = val | (*ptr&(A|D)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define MMU_PT_UPDATE_NO_TRANSLATE 3 /* checked '*ptr = val'. ptr is MA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * MMU EXTENDED OPERATIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * unsigned int *pdone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * unsigned int foreigndom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * A foreigndom (FD) can be specified (or DOMID_SELF for none).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * Where the FD has some effect, it is described below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * cmd: MMUEXT_(UN)PIN_*_TABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * mfn: Machine frame number to be (un)pinned as a p.t. page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * The frame must belong to the FD, if one is specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * cmd: MMUEXT_NEW_BASEPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * mfn: Machine frame number of new page-table base to install in MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * mfn: Machine frame number of new page-table base to install in MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * when in user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * cmd: MMUEXT_TLB_FLUSH_LOCAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * No additional arguments. Flushes local TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * cmd: MMUEXT_INVLPG_LOCAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * linear_addr: Linear address to be flushed from the local TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * cmd: MMUEXT_TLB_FLUSH_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * vcpumask: Pointer to bitmap of VCPUs to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * cmd: MMUEXT_INVLPG_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * linear_addr: Linear address to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * vcpumask: Pointer to bitmap of VCPUs to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * cmd: MMUEXT_TLB_FLUSH_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * No additional arguments. Flushes all VCPUs' TLBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * cmd: MMUEXT_INVLPG_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * cmd: MMUEXT_FLUSH_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * No additional arguments. Writes back and flushes cache contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * No additional arguments. Writes back and flushes cache contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * on all CPUs in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * cmd: MMUEXT_SET_LDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * linear_addr: Linear address of LDT base (NB. must be page-aligned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * nr_ents: Number of entries in LDT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * cmd: MMUEXT_CLEAR_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * mfn: Machine frame number to be cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * cmd: MMUEXT_COPY_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * mfn: Machine frame number of the destination page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * src_mfn: Machine frame number of the source page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * cmd: MMUEXT_[UN]MARK_SUPER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * mfn: Machine frame number of head of superpage to be [un]marked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #define MMUEXT_PIN_L1_TABLE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #define MMUEXT_PIN_L2_TABLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #define MMUEXT_PIN_L3_TABLE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define MMUEXT_PIN_L4_TABLE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define MMUEXT_UNPIN_TABLE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define MMUEXT_NEW_BASEPTR 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #define MMUEXT_TLB_FLUSH_LOCAL 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #define MMUEXT_INVLPG_LOCAL 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #define MMUEXT_TLB_FLUSH_MULTI 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #define MMUEXT_INVLPG_MULTI 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define MMUEXT_TLB_FLUSH_ALL 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define MMUEXT_INVLPG_ALL 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define MMUEXT_FLUSH_CACHE 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define MMUEXT_SET_LDT 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define MMUEXT_NEW_USER_BASEPTR 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define MMUEXT_CLEAR_PAGE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define MMUEXT_COPY_PAGE 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define MMUEXT_FLUSH_CACHE_GLOBAL 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define MMUEXT_MARK_SUPER 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define MMUEXT_UNMARK_SUPER 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct mmuext_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) xen_pfn_t mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) unsigned long linear_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } arg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* SET_LDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int nr_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* TLB_FLUSH_MULTI, INVLPG_MULTI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void *vcpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* COPY_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) xen_pfn_t src_mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } arg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* These are passed as 'flags' to update_va_mapping. They can be ORed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #define UVMF_NONE (0UL<<0) /* No flushing at all. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #define UVMF_FLUSHTYPE_MASK (3UL<<0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Commands to HYPERVISOR_console_io().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define CONSOLEIO_write 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #define CONSOLEIO_read 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Commands to HYPERVISOR_vm_assist().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) #define VMASST_CMD_enable 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #define VMASST_CMD_disable 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* x86/32 guests: simulate full 4GB segment limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #define VMASST_TYPE_4gb_segments 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #define VMASST_TYPE_4gb_segments_notify 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * x86 guests: support writes to bottom-level PTEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * NB1. Page-directory entries cannot be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * NB2. Guest must continue to remove all writable mappings of PTEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #define VMASST_TYPE_writable_pagetables 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* x86/PAE guests: support PDPTs above 4GB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #define VMASST_TYPE_pae_extended_cr3 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * x86 guests: Sane behaviour for virtual iopl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * - virtual iopl updated from do_iret() hypercalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * - virtual iopl reported in bounce frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * - guest kernels assumed to be level 0 for the purpose of iopl checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #define VMASST_TYPE_architectural_iopl 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * All guests: activate update indicator in vcpu_runstate_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Enable setting the XEN_RUNSTATE_UPDATE flag in guest memory mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * vcpu_runstate_info during updates of the runstate information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #define VMASST_TYPE_runstate_update_flag 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #define MAX_VMASST_TYPE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) typedef uint16_t domid_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #define DOMID_FIRST_RESERVED (0x7FF0U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* DOMID_SELF is used in certain contexts to refer to oneself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #define DOMID_SELF (0x7FF0U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * is useful to ensure that no mappings to the OS's own heap are accidentally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * installed. (e.g., in Linux this could cause havoc as reference counts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * aren't adjusted on the I/O-mapping code path).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * be specified by any calling domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #define DOMID_IO (0x7FF1U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * DOMID_XEN is used to allow privileged domains to map restricted parts of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Xen's heap space (e.g., the machine_to_phys table).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * the caller is privileged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #define DOMID_XEN (0x7FF2U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* DOMID_COW is used as the owner of sharable pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #define DOMID_COW (0x7FF3U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* DOMID_INVALID is used to identify pages with unknown owner. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #define DOMID_INVALID (0x7FF4U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Idle domain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #define DOMID_IDLE (0x7FFFU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Send an array of these to HYPERVISOR_mmu_update().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * NB. The fields are natural pointer/address size for this architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct mmu_update {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) uint64_t ptr; /* Machine address of PTE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) uint64_t val; /* New contents of PTE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Send an array of these to HYPERVISOR_multicall().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * NB. The fields are logically the natural register size for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * architecture. In cases where xen_ulong_t is larger than this then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * any unused bits in the upper portion must be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct multicall_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) xen_ulong_t op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) xen_long_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) xen_ulong_t args[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct vcpu_time_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Updates to the following values are preceded and followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * by an increment of 'version'. The guest can therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * detect updates by looking for changes to 'version'. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * least-significant bit of the version number is set then an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * update is in progress and the guest must wait to read a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * consistent set of values. The correct way to interact with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * the version number is similar to Linux's seqlock: see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * implementations of read_seqbegin/read_seqretry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) uint32_t version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) uint32_t pad0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) uint64_t tsc_timestamp; /* TSC at last update of time vals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) uint64_t system_time; /* Time, in nanosecs, since boot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Current system time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * CPU frequency (Hz):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) uint32_t tsc_to_system_mul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int8_t tsc_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int8_t pad1[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }; /* 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct vcpu_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * a pending notification for a particular VCPU. It is then cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * by the guest OS /before/ checking for pending work, thus avoiding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * a set-and-check race. Note that the mask is only accessed by Xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * on the CPU that is currently hosting the VCPU. This means that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * pending and mask flags can be updated by the guest without special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * synchronisation (i.e., no need for the x86 LOCK prefix).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * This may seem suboptimal because if the pending flag is set by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * a different CPU then an IPI may be scheduled even when the mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * is set. However, note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * 1. The task of 'interrupt holdoff' is covered by the per-event-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * channel mask bits. A 'noisy' event that is continually being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * triggered can be masked at source at this very precise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * 2. The main purpose of the per-VCPU mask is therefore to restrict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * reentrant execution: whether for concurrency control, or to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * prevent unbounded stack usage. Whatever the purpose, we expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * that the mask will be asserted only for short periods at a time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * and so the likelihood of a 'spurious' IPI is suitably small.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * The mask is read before making an event upcall to the guest: a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * non-zero mask therefore guarantees that the VCPU will not receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * an upcall activation. The mask is cleared when the VCPU requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * to block: this avoids wakeup-waiting races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) uint8_t evtchn_upcall_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) uint8_t evtchn_upcall_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) xen_ulong_t evtchn_pending_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct arch_vcpu_info arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct pvclock_vcpu_time_info time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }; /* 64 bytes (x86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Xen/kernel shared data -- pointer provided in start_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * NB. We expect that this struct is smaller than a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct shared_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * A domain can create "event channels" on which it can send and receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * asynchronous event notifications. There are three classes of event that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * are delivered by this mechanism:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * 1. Bi-directional inter- and intra-domain connections. Domains must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * arrange out-of-band to set up a connection (usually by allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * an unbound 'listener' port and avertising that via a storage service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * such as xenstore).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * 2. Physical interrupts. A domain with suitable hardware-access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * privileges can bind an event-channel port to a physical interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * 3. Virtual interrupts ('events'). A domain can bind an event-channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * port to a virtual interrupt source, such as the virtual-timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * device or the emergency console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Event channels are addressed by a "port index". Each channel is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * associated with two bits of information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * 1. PENDING -- notifies the domain that there is a pending notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * to be processed. This bit is cleared by the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * will cause an asynchronous upcall to be scheduled. This bit is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * updated by the guest. It is read-only within Xen. If a channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * becomes pending while the channel is masked then the 'edge' is lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * (i.e., when the channel is unmasked, the guest must manually handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * pending notifications as no upcall will be scheduled by Xen).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * To expedite scanning of pending notifications, any 0->1 pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * transition on an unmasked channel causes a corresponding bit in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * per-vcpu selector word to be set. Each bit in the selector covers a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * 'C long' in the PENDING bitfield array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Wallclock time: updated only by control software. Guests should base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * their gettimeofday() syscall on this wallclock-base value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct pvclock_wall_clock wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct arch_shared_info arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * Start-of-day memory layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * 1. The domain is started within contiguous virtual-memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * 2. The contiguous region begins and ends on an aligned 4MB boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * 3. This the order of bootstrap elements in the initial virtual region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * a. relocated kernel image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * b. initial ram disk [mod_start, mod_len]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * (may be omitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * c. list of allocated page frames [mfn_list, nr_pages]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * d. start_info_t structure [register ESI (x86)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * in case of dom0 this page contains the console info, too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * e. unless dom0: xenstore ring page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * f. unless dom0: console ring page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * g. bootstrap page tables [pt_base, CR3 (x86)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * h. bootstrap stack [register ESP (x86)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * layout for the domain. In particular, the bootstrap virtual-memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * region is a 1:1 mapping to the first section of the pseudo-physical map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * 6. All bootstrap elements are mapped read-writable for the guest OS. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * only exception is the bootstrap page table, which is mapped read-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * 7. There is guaranteed to be at least 512kB padding after the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * bootstrap element. If necessary, the bootstrap virtual region is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * extended by an extra 4MB to ensure this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) #define MAX_GUEST_CMDLINE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct start_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) char magic[32]; /* "xen-<version>-<platform>". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) unsigned long nr_pages; /* Total pages allocated to this domain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned long shared_info; /* MACHINE address of shared info struct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) uint32_t flags; /* SIF_xxx flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) uint32_t store_evtchn; /* Event channel for store communication. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) xen_pfn_t mfn; /* MACHINE page number of console page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) uint32_t evtchn; /* Event channel for console page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) } domU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) uint32_t info_off; /* Offset of console_info struct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) uint32_t info_size; /* Size of console_info struct from start.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) } dom0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) } console;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) unsigned long pt_base; /* VIRTUAL address of page directory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int8_t cmd_line[MAX_GUEST_CMDLINE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* The pfn range here covers both page table and p->m table frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* These flags are passed in the 'flags' field of start_info_t. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) #define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /* P->M making the 3 level tree obsolete? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * A multiboot module is a package containing modules very similar to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * multiboot module array. The only differences are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * - the array of module descriptors is by convention simply at the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * of the multiboot module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * - addresses in the module descriptors are based on the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * multiboot module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * - the number of modules is determined by a termination descriptor that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * mod_start == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * This permits to both build it statically and reference it in a configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * file, and let the PV guest easily rebase the addresses to virtual addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * and at the same time count the number of modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct xen_multiboot_mod_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Address of first byte of the module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) uint32_t mod_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Address of last byte of the module (inclusive) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) uint32_t mod_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* Address of zero-terminated command line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) uint32_t cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Unused, must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) uint32_t pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * The console structure in start_info.console.dom0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * This structure includes a variety of information required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * have a working VGA/VESA console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct dom0_vga_console_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) uint8_t video_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #define XEN_VGATYPE_TEXT_MODE_3 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) #define XEN_VGATYPE_VESA_LFB 0x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) #define XEN_VGATYPE_EFI_LFB 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Font height, in pixels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) uint16_t font_height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Cursor location (column, row). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) uint16_t cursor_x, cursor_y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Number of rows and columns (dimensions in characters). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) uint16_t rows, columns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) } text_mode_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Width and height, in pixels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) uint16_t width, height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Bytes per scan line. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) uint16_t bytes_per_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Bits per pixel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) uint16_t bits_per_pixel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* LFB physical address, and size (in units of 64kB). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) uint32_t lfb_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) uint32_t lfb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) uint8_t red_pos, red_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) uint8_t green_pos, green_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) uint8_t blue_pos, blue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) uint8_t rsvd_pos, rsvd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) uint32_t gbl_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* Mode attributes (offset 0x0, VESA command 0x4f01). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) uint16_t mode_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } vesa_lfb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) typedef uint64_t cpumap_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) typedef uint8_t xen_domain_handle_t[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Turn a plain number into a C unsigned long constant. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) #define __mk_unsigned_long(x) x ## UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) #define mk_unsigned_long(x) __mk_unsigned_long(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) #define TMEM_SPEC_VERSION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct tmem_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) uint32_t cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int32_t pool_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct { /* for cmd == TMEM_NEW_POOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) uint64_t uuid[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) uint32_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) } new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) uint64_t oid[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) uint32_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) uint32_t tmem_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) uint32_t pfn_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) uint32_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) GUEST_HANDLE(void) gmfn; /* guest machine page frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) } gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) DEFINE_GUEST_HANDLE(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) #else /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /* In assembly code we cannot use C numeric constant suffixes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) #define mk_unsigned_long(x) x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #endif /* __XEN_PUBLIC_XEN_H__ */