^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched/idle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/user-return-notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/elf-randomize.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <trace/events/power.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/mwait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/vm86.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/spec-ctrl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/io_bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/frame.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "process.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * per-CPU TSS segments. Threads are completely 'soft' on Linux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * no more per-task TSS's. The TSS size is kept cacheline-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * so they are allowed to end up in the .data..cacheline_aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * section. Since TSS's are completely CPU-local, we want them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * on exact cacheline boundaries, to eliminate cacheline ping-pong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .x86_tss = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * .sp0 is only used when entering ring 0 from a lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * privilege level. Since the init task never runs anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * but ring 0 code, there is no need for a valid value here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Poison it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * .sp1 is cpu_current_top_of_stack. The init task never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * runs user code, but cpu_current_top_of_stack should still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * be well defined before the first context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .sp1 = TOP_OF_INIT_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .ss0 = __KERNEL_DS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .ss1 = __KERNEL_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) DEFINE_PER_CPU(bool, __tss_limit_invalid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * this gets called so that we can store lazy state into memory and copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * current task into the new thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) memcpy(dst, src, arch_task_struct_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #ifdef CONFIG_VM86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dst->thread.vm86 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return fpu__copy(dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Free thread data structures etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void exit_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct thread_struct *t = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct fpu *fpu = &t->fpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (test_thread_flag(TIF_IO_BITMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) io_bitmap_exit(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) free_vm86(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) fpu__drop(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int set_new_tls(struct task_struct *p, unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct user_desc __user *utls = (struct user_desc __user *)tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (in_ia32_syscall())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return do_set_thread_area(p, -1, utls, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return do_set_thread_area_64(p, ARCH_SET_FS, tls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct task_struct *p, unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct inactive_task_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct fork_frame *fork_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct pt_regs *childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) childregs = task_pt_regs(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fork_frame = container_of(childregs, struct fork_frame, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) frame = &fork_frame->frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) frame->bp = encode_frame_pointer(childregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) frame->ret_addr = (unsigned long) ret_from_fork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) p->thread.sp = (unsigned long) fork_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) p->thread.io_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) p->thread.iopl_warn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) current_save_fsgs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) p->thread.fsindex = current->thread.fsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) p->thread.fsbase = current->thread.fsbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) p->thread.gsindex = current->thread.gsindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) p->thread.gsbase = current->thread.gsbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) savesegment(es, p->thread.es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) savesegment(ds, p->thread.ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) p->thread.sp0 = (unsigned long) (childregs + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Clear all status flags including IF and set fixed bit. 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * does not have this initialization as the frame does not contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * flags. The flags consistency (especially vs. AC) is there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * ensured via objtool, which lacks 32bit support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) frame->flags = X86_EFLAGS_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Kernel thread ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (unlikely(p->flags & PF_KTHREAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) memset(childregs, 0, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) kthread_frame_init(frame, sp, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) frame->bx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *childregs = *current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) childregs->ax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) childregs->sp = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) task_user_gs(p) = get_user_gs(current_pt_regs());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Set a new TLS for the child thread? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (clone_flags & CLONE_SETTLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ret = set_new_tls(p, tls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) io_bitmap_share(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void flush_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) flush_ptrace_hw_breakpoint(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) fpu__clear_all(&tsk->thread.fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void disable_TSC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!test_and_set_thread_flag(TIF_NOTSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Must flip the CPU state synchronously with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * TIF_NOTSC in the current running context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) cr4_set_bits(X86_CR4_TSD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void enable_TSC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (test_and_clear_thread_flag(TIF_NOTSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Must flip the CPU state synchronously with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * TIF_NOTSC in the current running context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) cr4_clear_bits(X86_CR4_TSD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int get_tsc_mode(unsigned long adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (test_thread_flag(TIF_NOTSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) val = PR_TSC_SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) val = PR_TSC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return put_user(val, (unsigned int __user *)adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int set_tsc_mode(unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (val == PR_TSC_SIGSEGV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) disable_TSC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) else if (val == PR_TSC_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) enable_TSC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) DEFINE_PER_CPU(u64, msr_misc_features_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void set_cpuid_faulting(bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u64 msrval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) msrval = this_cpu_read(msr_misc_features_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) this_cpu_write(msr_misc_features_shadow, msrval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void disable_cpuid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!test_and_set_thread_flag(TIF_NOCPUID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Must flip the CPU state synchronously with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * TIF_NOCPUID in the current running context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) set_cpuid_faulting(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void enable_cpuid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (test_and_clear_thread_flag(TIF_NOCPUID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Must flip the CPU state synchronously with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * TIF_NOCPUID in the current running context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) set_cpuid_faulting(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int get_cpuid_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return !test_thread_flag(TIF_NOCPUID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (cpuid_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) enable_cpuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) disable_cpuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Called immediately after a successful exec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void arch_setup_new_exec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* If cpuid was previously disabled for this task, re-enable it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (test_thread_flag(TIF_NOCPUID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) enable_cpuid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Don't inherit TIF_SSBD across exec boundary when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * PR_SPEC_DISABLE_NOEXEC is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (test_thread_flag(TIF_SSBD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) task_spec_ssb_noexec(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) clear_thread_flag(TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) task_clear_spec_ssb_disable(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) task_clear_spec_ssb_noexec(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) speculation_ctrl_update(task_thread_info(current)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef CONFIG_X86_IOPL_IOPERM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static inline void switch_to_bitmap(unsigned long tifp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Invalidate I/O bitmap if the previous task used it. This prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * any possible leakage of an active I/O bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * If the next task has an I/O bitmap it will handle it on exit to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (tifp & _TIF_IO_BITMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) tss_invalidate_io_bitmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Copy at least the byte range of the incoming tasks bitmap which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * covers the permitted I/O ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * If the previous task which used an I/O bitmap had more bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * permitted, then the copy needs to cover those as well so they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * get turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) max(tss->io_bitmap.prev_max, iobm->max));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Store the new max and the sequence number of this bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * and a pointer to the bitmap itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) tss->io_bitmap.prev_max = iobm->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) tss->io_bitmap.prev_sequence = iobm->sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) void native_tss_update_io_bitmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct thread_struct *t = ¤t->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u16 *base = &tss->x86_tss.io_bitmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!test_thread_flag(TIF_IO_BITMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) native_tss_invalidate_io_bitmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) *base = IO_BITMAP_OFFSET_VALID_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct io_bitmap *iobm = t->io_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Only copy bitmap data when the sequence number differs. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * update time is accounted to the incoming task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (tss->io_bitmap.prev_sequence != iobm->sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) tss_copy_io_bitmap(tss, iobm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* Enable the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *base = IO_BITMAP_OFFSET_VALID_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Make sure that the TSS limit is covering the IO bitmap. It might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * access from user space to trigger a #GP because tbe bitmap is outside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * the TSS limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) refresh_tss_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #else /* CONFIG_X86_IOPL_IOPERM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static inline void switch_to_bitmap(unsigned long tifp) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct ssb_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct ssb_state *shared_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned int disable_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned long local_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define LSTATE_SSB 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static DEFINE_PER_CPU(struct ssb_state, ssb_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) void speculative_store_bypass_ht_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct ssb_state *st = this_cpu_ptr(&ssb_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned int this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) st->local_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Shared state setup happens once on the first bringup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * of the CPU. It's not destroyed on CPU hotunplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (st->shared_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) raw_spin_lock_init(&st->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Go over HT siblings and check whether one of them has set up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * shared state pointer already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (!per_cpu(ssb_state, cpu).shared_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Link it to the state of the sibling: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) st->shared_state = per_cpu(ssb_state, cpu).shared_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * First HT sibling to come up on the core. Link shared state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * the first HT sibling to itself. The siblings on the same core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * which come up later will see the shared state pointer and link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * themself to the state of this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) st->shared_state = st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * Logic is: First HT sibling enables SSBD for both siblings in the core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * and last sibling to disable it, disables it for the whole core. This how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * MSR_SPEC_CTRL works in "hardware":
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct ssb_state *st = this_cpu_ptr(&ssb_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u64 msr = x86_amd_ls_cfg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!static_cpu_has(X86_FEATURE_ZEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) msr |= ssbd_tif_to_amd_ls_cfg(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) wrmsrl(MSR_AMD64_LS_CFG, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (tifn & _TIF_SSBD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Since this can race with prctl(), block reentry on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * same CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) msr |= x86_amd_ls_cfg_ssbd_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) raw_spin_lock(&st->shared_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* First sibling enables SSBD: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!st->shared_state->disable_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) wrmsrl(MSR_AMD64_LS_CFG, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) st->shared_state->disable_state++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) raw_spin_unlock(&st->shared_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) raw_spin_lock(&st->shared_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) st->shared_state->disable_state--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!st->shared_state->disable_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) wrmsrl(MSR_AMD64_LS_CFG, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) raw_spin_unlock(&st->shared_state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) wrmsrl(MSR_AMD64_LS_CFG, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * so ssbd_tif_to_spec_ctrl() just works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * Update the MSRs managing speculation control, during context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * tifp: Previous task's thread flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * tifn: Next task's thread flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static __always_inline void __speculation_ctrl_update(unsigned long tifp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned long tifn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned long tif_diff = tifp ^ tifn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u64 msr = x86_spec_ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) bool updmsr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* Handle change of TIF_SSBD depending on the mitigation method. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (tif_diff & _TIF_SSBD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) amd_set_ssb_virt_state(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (tif_diff & _TIF_SSBD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) amd_set_core_ssb_state(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static_cpu_has(X86_FEATURE_AMD_SSBD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) updmsr |= !!(tif_diff & _TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) msr |= ssbd_tif_to_spec_ctrl(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (IS_ENABLED(CONFIG_SMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static_branch_unlikely(&switch_to_cond_stibp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) updmsr |= !!(tif_diff & _TIF_SPEC_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) msr |= stibp_tif_to_spec_ctrl(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (updmsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) wrmsrl(MSR_IA32_SPEC_CTRL, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (task_spec_ssb_disable(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) set_tsk_thread_flag(tsk, TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) clear_tsk_thread_flag(tsk, TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (task_spec_ib_disable(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) set_tsk_thread_flag(tsk, TIF_SPEC_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Return the updated threadinfo flags*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return task_thread_info(tsk)->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) void speculation_ctrl_update(unsigned long tif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Forced update. Make sure all relevant TIF flags are different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __speculation_ctrl_update(~tif, tif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Called from seccomp/prctl update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) void speculation_ctrl_update_current(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) speculation_ctrl_update(speculation_ctrl_update_tif(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) newval = cr4 ^ mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (newval != cr4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) this_cpu_write(cpu_tlbstate.cr4, newval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) __write_cr4(newval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned long tifp, tifn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) tifn = READ_ONCE(task_thread_info(next_p)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) tifp = READ_ONCE(task_thread_info(prev_p)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) switch_to_bitmap(tifp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) propagate_user_return_notify(prev_p, next_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) arch_has_block_step()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long debugctl, msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) debugctl &= ~DEBUGCTLMSR_BTF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) msk = tifn & _TIF_BLOCKSTEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if ((tifp ^ tifn) & _TIF_NOTSC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) cr4_toggle_bits_irqsoff(X86_CR4_TSD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if ((tifp ^ tifn) & _TIF_NOCPUID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) __speculation_ctrl_update(tifp, tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) speculation_ctrl_update_tif(prev_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) tifn = speculation_ctrl_update_tif(next_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Enforce MSR update to ensure consistent state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) __speculation_ctrl_update(~tifn, tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if ((tifp ^ tifn) & _TIF_SLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) switch_to_sld(tifn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * Idle related variables and functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) EXPORT_SYMBOL(boot_option_idle_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void (*x86_idle)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static inline void play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void arch_cpu_idle_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) tsc_verify_tsc_adjust(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) local_touch_nmi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) void arch_cpu_idle_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) play_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Called from the generic idle code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) x86_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * We use this if we don't have any better idle routine..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void __cpuidle default_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) raw_safe_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) EXPORT_SYMBOL(default_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #ifdef CONFIG_XEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bool xen_set_default_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bool ret = !!x86_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) x86_idle = default_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) void stop_this_cpu(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Remove this CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) set_cpu_online(smp_processor_id(), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) disable_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * Use wbinvd on processors that support SME. This provides support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * for performing a successful kexec when going from SME inactive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * to SME active (or vice-versa). The cache must be cleared so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * if there are entries with the same physical address, both with and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * without the encryption bit, they don't race each other when flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * and potentially end up with the wrong entry being committed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (boot_cpu_has(X86_FEATURE_SME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) native_wbinvd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * Use native_halt() so that memory contents don't change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * (stack usage and variables) after possibly issuing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * native_wbinvd() above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) native_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * states (local apic timer and TSC stop).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * XXX this function is completely buggered vs RCU and tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static void amd_e400_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * gets set after static_cpu_has() places have been converted via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * alternatives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) default_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) tick_broadcast_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) default_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * The switch back from broadcast mode needs to be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) raw_local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) tick_broadcast_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Intel Core2 and older machines prefer MWAIT over HALT for C1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * We can't rely on cpuidle installing MWAIT, because it will not load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * on systems that support only C1 -- so the boot default must be MWAIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Some AMD machines are the opposite, they depend on using HALT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * So for default C1, which is used during boot until cpuidle loads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * use MWAIT-C1 on Intel HW that has it, else use HALT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (c->x86_vendor != X86_VENDOR_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * with interrupts enabled and no flags, which is backwards compatible with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * original MWAIT implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static __cpuidle void mwait_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!current_set_polling_and_test()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) mb(); /* quirk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) clflush((void *)¤t_thread_info()->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mb(); /* quirk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) __monitor((void *)¤t_thread_info()->flags, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) __sti_mwait(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) __current_clr_polling();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void select_idle_routine(const struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (x86_idle || boot_option_idle_override == IDLE_POLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pr_info("using AMD E400 aware idle routine\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) x86_idle = amd_e400_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) } else if (prefer_mwait_c1_over_halt(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) pr_info("using mwait in idle threads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) x86_idle = mwait_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) x86_idle = default_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) void amd_e400_c1e_apic_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) tick_broadcast_force();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) void __init arch_post_acpi_subsys_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u32 lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * AMD E400 detection needs to happen after ACPI has been enabled. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * MSR_K8_INT_PENDING_MSG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) mark_tsc_unstable("TSC halt in AMD C1E");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) pr_info("System has AMD C1E enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static int __init idle_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!strcmp(str, "poll")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) pr_info("using polling idle threads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) boot_option_idle_override = IDLE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) cpu_idle_poll_ctrl(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) } else if (!strcmp(str, "halt")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * When the boot option of idle=halt is added, halt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * forced to be used for CPU idle. In such case CPU C2/C3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * won't be used again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * To continue to load the CPU idle driver, don't touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * the boot_option_idle_override.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) x86_idle = default_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) boot_option_idle_override = IDLE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) } else if (!strcmp(str, "nomwait")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * If the boot option of "idle=nomwait" is added,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * it means that mwait will be disabled for CPU C2/C3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * states. In such case it won't touch the variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * of boot_option_idle_override.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) boot_option_idle_override = IDLE_NOMWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) early_param("idle", idle_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) unsigned long arch_align_stack(unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) sp -= get_random_int() % 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return sp & ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) unsigned long arch_randomize_brk(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return randomize_page(mm->brk, 0x02000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Called from fs/proc with a reference on @p to find the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * which called into schedule(). This needs to be done carefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * because the task might wake up and we might look at a stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * changing under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) unsigned long get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned long start, bottom, top, sp, fp, ip, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (p == current || p->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!try_get_task_stack(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) start = (unsigned long)task_stack_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Layout of the stack page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * PADDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * ----------- bottom = start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * The tasks stack pointer points at the location where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * framepointer is stored. The data on the stack is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * ... IP FP ... IP FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * We need to read FP and IP, so we need to adjust the upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * bound by another unsigned long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) top -= 2 * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) bottom = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) sp = READ_ONCE(p->thread.sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (sp < bottom || sp > top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (fp < bottom || fp > top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!in_sched_functions(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ret = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) } while (count++ < 16 && p->state != TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) put_task_stack(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) long do_arch_prctl_common(struct task_struct *task, int option,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) unsigned long cpuid_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) switch (option) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case ARCH_GET_CPUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return get_cpuid_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) case ARCH_SET_CPUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return set_cpuid_mode(task, cpuid_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }