Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Asm versions of Xen pv-ops, suitable for direct use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * We only bother with direct forms (ie, vcpu in percpu data) of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * operations here; the indirect forms are better handled in C.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/processor-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/segment.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/frame.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <../entry/calling.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Enable events.  This clears the event mask and tests the pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * event status with one and operation.  If there are pending events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * then enter the hypervisor to get them handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) SYM_FUNC_START(xen_irq_enable_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	FRAME_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	/* Unmask events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	 * Preempt here doesn't matter because that will deal with any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	 * pending interrupts.  The pending check may end up being run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 * on the wrong CPU, but that doesn't hurt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* Test for pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	jz 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	call check_events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	FRAME_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) SYM_FUNC_END(xen_irq_enable_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * Disabling events is simply a matter of making the event mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) SYM_FUNC_START(xen_irq_disable_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) SYM_FUNC_END(xen_irq_disable_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * (xen_)save_fl is used to get the current interrupt enable status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Callers expect the status to be in X86_EFLAGS_IF, and other bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * may be set in the return value.  We take advantage of this by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * making sure that X86_EFLAGS_IF has the right value (and other bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * in that byte are 0), but other bits in the return value are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * undefined.  We need to toggle the state of the bit, because Xen and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * x86 use opposite senses (mask vs enable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) SYM_FUNC_START(xen_save_fl_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	setz %ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	addb %ah, %ah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) SYM_FUNC_END(xen_save_fl_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * In principle the caller should be passing us a value return from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * xen_save_fl_direct, but for robustness sake we test only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * X86_EFLAGS_IF flag rather than the whole byte. After setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * interrupt mask state, it checks for unmasked pending events and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * enters the hypervisor to get them delivered if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) SYM_FUNC_START(xen_restore_fl_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	FRAME_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	testw $X86_EFLAGS_IF, %di
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * Preempt here doesn't matter because that will deal with any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * pending interrupts.  The pending check may end up being run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * on the wrong CPU, but that doesn't hurt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* check for unmasked and pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	jnz 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	call check_events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	FRAME_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) SYM_FUNC_END(xen_restore_fl_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * Force an event check by making a hypercall, but preserve regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * before making the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) SYM_FUNC_START(check_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	FRAME_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	push %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	push %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	push %rdx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	push %rsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	push %rdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	push %r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	push %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	push %r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	push %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	call xen_force_evtchn_callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pop %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	pop %r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	pop %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	pop %r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	pop %rdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	pop %rsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	pop %rdx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	pop %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	pop %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	FRAME_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) SYM_FUNC_END(check_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) SYM_FUNC_START(xen_read_cr2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	FRAME_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	FRAME_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) SYM_FUNC_END(xen_read_cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) SYM_FUNC_START(xen_read_cr2_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	FRAME_BEGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	FRAME_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) SYM_FUNC_END(xen_read_cr2_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) .macro xen_pv_trap name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) SYM_CODE_START(xen_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	pop %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	pop %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	jmp  \name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) SYM_CODE_END(xen_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) _ASM_NOKPROBE(xen_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) xen_pv_trap asm_exc_divide_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) xen_pv_trap asm_xenpv_exc_debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) xen_pv_trap asm_exc_int3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) xen_pv_trap asm_xenpv_exc_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) xen_pv_trap asm_exc_overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) xen_pv_trap asm_exc_bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) xen_pv_trap asm_exc_invalid_op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) xen_pv_trap asm_exc_device_not_available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) xen_pv_trap asm_exc_double_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) xen_pv_trap asm_exc_coproc_segment_overrun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) xen_pv_trap asm_exc_invalid_tss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) xen_pv_trap asm_exc_segment_not_present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) xen_pv_trap asm_exc_stack_segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) xen_pv_trap asm_exc_general_protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) xen_pv_trap asm_exc_page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) xen_pv_trap asm_exc_spurious_interrupt_bug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) xen_pv_trap asm_exc_coprocessor_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) xen_pv_trap asm_exc_alignment_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #ifdef CONFIG_X86_MCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) xen_pv_trap asm_exc_machine_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #endif /* CONFIG_X86_MCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) xen_pv_trap asm_exc_simd_coprocessor_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #ifdef CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) xen_pv_trap entry_INT80_compat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) xen_pv_trap asm_exc_xen_unknown_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) xen_pv_trap asm_exc_xen_hypervisor_callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	__INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) SYM_CODE_START(xen_early_idt_handler_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	i = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	.rept NUM_EXCEPTION_VECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	pop %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	pop %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	i = i + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) SYM_CODE_END(xen_early_idt_handler_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	__FINIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * Xen64 iret frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *	ss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *	rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *	rflags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *	cs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *	rip		<-- standard iret frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  *	flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  *	rcx		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  *	r11		}<-- pushed by hypercall page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * rsp->rax		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) SYM_CODE_START(xen_iret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	pushq $0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	jmp hypercall_iret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) SYM_CODE_END(xen_iret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) SYM_CODE_START(xen_sysret64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 * We're already on the usermode stack at this point, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 * still with the kernel gs, so we can easily switch back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 * tss.sp2 is scratch space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	pushq $__USER_DS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	pushq %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	pushq $__USER_CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	pushq %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	pushq $VGCF_in_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	jmp hypercall_iret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) SYM_CODE_END(xen_sysret64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * in XEN pv would cause %rsp to move up to the top of the kernel stack and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * frame at the same address is useless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	UNWIND_HINT_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	POP_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/* stackleak_erase() can work safely on the kernel stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	STACKLEAK_ERASE_NOCLOBBER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	addq	$8, %rsp	/* skip regs->orig_ax */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	jmp xen_iret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * Xen handles syscall callbacks much like ordinary exceptions, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * means we have:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * - kernel gs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * - kernel rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * - an iret-like stack frame on the stack (including rcx and r11):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  *	ss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  *	rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  *	rflags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  *	cs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  *	rip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  *	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * rsp->rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Normal 64-bit system call target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) SYM_FUNC_START(xen_syscall_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	popq %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	popq %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 * Neither Xen nor the kernel really knows what the old SS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 * report those values even though Xen will guess its own values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	movq $__USER_DS, 4*8(%rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	movq $__USER_CS, 1*8(%rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	jmp entry_SYSCALL_64_after_hwframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) SYM_FUNC_END(xen_syscall_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* 32-bit compat syscall target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) SYM_FUNC_START(xen_syscall32_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	popq %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	popq %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 * Neither Xen nor the kernel really knows what the old SS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	 * report those values even though Xen will guess its own values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	movq $__USER32_DS, 4*8(%rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	movq $__USER32_CS, 1*8(%rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	jmp entry_SYSCALL_compat_after_hwframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) SYM_FUNC_END(xen_syscall32_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* 32-bit compat sysenter target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) SYM_FUNC_START(xen_sysenter_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * that we don't need to guard against single step exceptions here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	popq %rcx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	popq %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 * Neither Xen nor the kernel really knows what the old SS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * report those values even though Xen will guess its own values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	movq $__USER32_DS, 4*8(%rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	movq $__USER32_CS, 1*8(%rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	jmp entry_SYSENTER_compat_after_hwframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) SYM_FUNC_END(xen_sysenter_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #else /* !CONFIG_IA32_EMULATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) SYM_FUNC_START_ALIAS(xen_syscall32_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) SYM_FUNC_START(xen_sysenter_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	mov $-ENOSYS, %rax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	pushq $0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	jmp hypercall_iret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) SYM_FUNC_END(xen_sysenter_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) SYM_FUNC_END_ALIAS(xen_syscall32_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #endif	/* CONFIG_IA32_EMULATION */