^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_ENTRY_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_ENTRY_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/user-return-notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/io_bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* Check that the stack and regs on entry from user mode are sane. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static __always_inline void arch_check_user_regs(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Make sure that the entry code gave us a sensible EFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * register. Native because we want to check the actual CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * state, not the interrupt state as imagined by Xen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long flags = native_save_fl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * For !SMAP hardware we patch out CLAC on entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (boot_cpu_has(X86_FEATURE_SMAP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) (IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) mask |= X86_EFLAGS_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) WARN_ON_ONCE(flags & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* We think we came from user mode. Make sure pt_regs agrees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) WARN_ON_ONCE(!user_mode(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * All entries from user mode (except #DF) should be on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * normal thread stack and should have user pt_regs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * correct location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) WARN_ON_ONCE(!on_thread_stack());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) WARN_ON_ONCE(regs != task_pt_regs(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define arch_check_user_regs arch_check_user_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long ti_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (ti_work & _TIF_USER_RETURN_NOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) fire_user_return_notifiers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (unlikely(ti_work & _TIF_IO_BITMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) tss_update_io_bitmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) fpregs_assert_state_consistent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) switch_fpu_return();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Compat syscalls set TS_COMPAT. Make sure we clear it before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * returning to user mode. We need to clear it *after* signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * handling, because syscall restart has a fixup for compat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * syscalls. The fixup is exercised by the ptrace_syscall_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * selftest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * special case only applies after poking regs and before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * very next return to user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static __always_inline void arch_exit_to_user_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mds_user_clear_cpu_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define arch_exit_to_user_mode arch_exit_to_user_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #endif