^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2003 PathScale, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <as-layout.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <kern_util.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <os.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <skas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/time-internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * This is a per-cpu array. A processor only modifies its entry and it only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * cares about its entry, so it's OK if another processor is modifying its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline int external_pid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* FIXME: Need to look up userspace_pid by cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return userspace_pid[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int pid_to_processor_id(int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) for (i = 0; i < ncpus; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (cpu_tasks[i].pid == pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void free_stack(unsigned long stack, int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) free_pages(stack, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long alloc_stack(int order, int atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) gfp_t flags = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) flags = GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) page = __get_free_pages(flags, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline void set_current(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) { external_pid(), task });
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) extern void arch_switch_to(struct task_struct *to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void *__switch_to(struct task_struct *from, struct task_struct *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) to->thread.prev_sched = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) set_current(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) arch_switch_to(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return current->thread.prev_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void interrupt_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct pt_regs *regs = ¤t->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (test_thread_flag(TIF_SIGPENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) do_signal(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (test_thread_flag(TIF_NOTIFY_RESUME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tracehook_notify_resume(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int get_current_pid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return task_pid_nr(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * This is called magically, by its address being stuffed in a jmp_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * and being longjmp-d to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void new_thread_handler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int (*fn)(void *), n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (current->thread.prev_sched != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) schedule_tail(current->thread.prev_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) current->thread.prev_sched = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) fn = current->thread.request.u.thread.proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) arg = current->thread.request.u.thread.arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * callback returns only if the kernel thread execs a process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) n = fn(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Called magically, see new_thread_handler above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void fork_handler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) force_flush_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) schedule_tail(current->thread.prev_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * XXX: if interrupt_end() calls schedule, this call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * arch_switch_to isn't needed. We could want to apply this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * improve performance. -bb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) arch_switch_to(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) current->thread.prev_sched = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int copy_thread(unsigned long clone_flags, unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long arg, struct task_struct * p, unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void (*handler)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int kthread = current->flags & PF_KTHREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) p->thread = (struct thread_struct) INIT_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!kthread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) memcpy(&p->thread.regs.regs, current_pt_regs(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sizeof(p->thread.regs.regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (sp != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) REGS_SP(p->thread.regs.regs.gp) = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) handler = fork_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) arch_copy_thread(¤t->thread.arch, &p->thread.arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) p->thread.request.u.thread.proc = (int (*)(void *))sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) p->thread.request.u.thread.arg = (void *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) handler = new_thread_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!kthread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) clear_flushed_tls(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Set a new TLS for the child thread?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (clone_flags & CLONE_SETTLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = arch_set_tls(p, tls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void initial_thread_cb(void (*proc)(void *), void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int save_kmalloc_ok = kmalloc_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kmalloc_ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) initial_thread_cb_skas(proc, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) kmalloc_ok = save_kmalloc_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void um_idle_sleep(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long long duration = UM_NSEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (time_travel_mode != TT_MODE_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) time_travel_sleep(duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) os_idle_sleep(duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) um_idle_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int __cant_sleep(void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return in_atomic() || irqs_disabled() || in_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Is in_interrupt() really needed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int user_context(unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned long stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return stack != (unsigned long) current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) void do_uml_exitcalls(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) exitcall_t *call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) call = &__uml_exitcall_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) while (--call >= &__uml_exitcall_begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) (*call)();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) char *uml_strdup(const char *string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return kstrdup(string, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) EXPORT_SYMBOL(uml_strdup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int copy_to_user_proc(void __user *to, void *from, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return copy_to_user(to, from, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int copy_from_user_proc(void *to, void __user *from, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return copy_from_user(to, from, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int clear_user_proc(void __user *buf, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return clear_user(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return current_thread_info()->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static atomic_t using_sysemu = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int sysemu_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void set_using_sysemu(int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (value > sysemu_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) atomic_set(&using_sysemu, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int get_using_sysemu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return atomic_read(&using_sysemu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int sysemu_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) seq_printf(m, "%d\n", get_using_sysemu());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static int sysemu_proc_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return single_open(file, sysemu_proc_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) size_t count, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) char tmp[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (copy_from_user(tmp, buf, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (tmp[0] >= '0' && tmp[0] <= '2')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) set_using_sysemu(tmp[0] - '0');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* We use the first char, but pretend to write everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static const struct proc_ops sysemu_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) .proc_open = sysemu_proc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .proc_read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .proc_lseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .proc_release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .proc_write = sysemu_proc_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int __init make_proc_sysemu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct proc_dir_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!sysemu_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ent == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) printk(KERN_WARNING "Failed to register /proc/sysemu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) late_initcall(make_proc_sysemu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int singlestepping(void * t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct task_struct *task = t ? t : current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!(task->ptrace & PT_DTRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (task->thread.singlestep_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Only x86 and x86_64 have an arch_align_stack().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * All other arches have "#define arch_align_stack(x) (x)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * in their asm/exec.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * As this is included in UML from asm-um/system-generic.h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * we can use it to behave as the subarch does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #ifndef arch_align_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long arch_align_stack(unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) sp -= get_random_int() % 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return sp & ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned long get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long stack_page, sp, ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) bool seen_sched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) stack_page = (unsigned long) task_stack_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Bail if the process has no kernel stack for some reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (stack_page == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sp = p->thread.switch_buf->JB_SP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Bail if the stack pointer is below the bottom of the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * stack for some reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (sp < stack_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) while (sp < stack_page + THREAD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ip = *((unsigned long *) sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (in_sched_functions(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Ignore everything until we're above the scheduler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) seen_sched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) else if (kernel_text_address(ip) && seen_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) sp += sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int cpu = current_thread_info()->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)