^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* arch/sparc64/kernel/process.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This file handles the architecture-dependent parts of process handling..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <stdarg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/elfcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/sysrq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/pstate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/fpumacro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/cpudata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/pcr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "kstack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Idle loop support on sparc64. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (tlb_type != hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the cpu sleep hypervisor call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) "rdpr %%pstate, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "andn %0, %1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) "wrpr %0, %%g0, %%pstate"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) : "=&r" (pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) : "i" (PSTATE_IE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) sun4v_cpu_yield();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* If resumed by cpu_poke then we need to explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * call scheduler_ipi().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) scheduler_poke();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Re-enable interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) "rdpr %%pstate, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "or %0, %1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) "wrpr %0, %%g0, %%pstate"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) : "=&r" (pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) : "i" (PSTATE_IE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void arch_cpu_idle_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) sched_preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cpu_play_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void show_regwindow32(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct reg_window32 __user *rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct reg_window32 r_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mm_segment_t old_fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __asm__ __volatile__ ("flushw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rw = compat_ptr((unsigned int)regs->u_regs[14]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) old_fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) set_fs (USER_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (copy_from_user (&r_w, rw, sizeof(r_w))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) set_fs (old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) set_fs (old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) printk("l0: %08x l1: %08x l2: %08x l3: %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) "l4: %08x l5: %08x l6: %08x l7: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) printk("i0: %08x i1: %08x i2: %08x i3: %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) "i4: %08x i5: %08x i6: %08x i7: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define show_regwindow32(regs) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void show_regwindow(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct reg_window __user *rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct reg_window *rwk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct reg_window r_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mm_segment_t old_fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) __asm__ __volatile__ ("flushw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) rw = (struct reg_window __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) (regs->u_regs[14] + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) rwk = (struct reg_window *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) (regs->u_regs[14] + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!(regs->tstate & TSTATE_PRIV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) old_fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) set_fs (USER_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (copy_from_user (&r_w, rw, sizeof(r_w))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) set_fs (old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) rwk = &r_w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) set_fs (old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) show_regwindow32(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (regs->tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) printk("I7: <%pS>\n", (void *) rwk->ins[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void show_regs(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) show_regs_print_info(KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) regs->tpc, regs->tnpc, regs->y, print_tainted());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) printk("TPC: <%pS>\n", (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) regs->u_regs[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) regs->u_regs[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) regs->u_regs[11]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) regs->u_regs[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) show_regwindow(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct global_reg_snapshot *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) flushw_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) rp = &global_cpu_snapshot[this_cpu].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) rp->tstate = regs->tstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rp->tpc = regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) rp->tnpc = regs->tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) rp->o7 = regs->u_regs[UREG_I7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct reg_window *rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) rw = (struct reg_window *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (regs->u_regs[UREG_FP] + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (kstack_valid(tp, (unsigned long) rw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) rp->i7 = rw->ins[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rw = (struct reg_window *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) (rw->ins[6] + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (kstack_valid(tp, (unsigned long) rw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rp->rpc = rw->ins[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rp->i7 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) rp->rpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rp->thread = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* In order to avoid hangs we do not try to synchronize with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * global register dump client cpus. The last store they make is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * the thread pointer, so do a short poll waiting for that to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void __global_reg_poll(struct global_reg_snapshot *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) while (!gp->thread && ++limit < 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct thread_info *tp = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct pt_regs *regs = get_irq_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int this_cpu, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) regs = tp->kregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) this_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __global_reg_self(tp, regs, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) smp_fetch_global_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for_each_cpu(cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct global_reg_snapshot *gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (exclude_self && cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) gp = &global_cpu_snapshot[cpu].reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) __global_reg_poll(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) tp = gp->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) (cpu == this_cpu ? '*' : ' '), cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) gp->tstate, gp->tpc, gp->tnpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ((tp && tp->task) ? tp->task->comm : "NULL"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ((tp && tp->task) ? tp->task->pid : -1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (gp->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) (void *) gp->tpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) (void *) gp->o7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) (void *) gp->i7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) (void *) gp->rpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) gp->tpc, gp->o7, gp->i7, gp->rpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #ifdef CONFIG_MAGIC_SYSRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void sysrq_handle_globreg(int key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) trigger_all_cpu_backtrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static const struct sysrq_key_op sparc_globalreg_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .handler = sysrq_handle_globreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .help_msg = "global-regs(y)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .action_msg = "Show Global CPU Regs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void __global_pmu_self(int this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct global_pmu_snapshot *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int i, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!pcr_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) pp = &global_cpu_snapshot[this_cpu].pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (tlb_type == hypervisor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) num = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pp->pcr[i] = pcr_ops->read_pcr(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) pp->pic[i] = pcr_ops->read_pic(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void __global_pmu_poll(struct global_pmu_snapshot *pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) while (!pp->pcr[0] && ++limit < 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void pmu_snapshot_all_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int this_cpu, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) this_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __global_pmu_self(this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) smp_fetch_global_pmu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) __global_pmu_poll(pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) (cpu == this_cpu ? '*' : ' '), cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void sysrq_handle_globpmu(int key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pmu_snapshot_all_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static const struct sysrq_key_op sparc_globalpmu_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .handler = sysrq_handle_globpmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .help_msg = "global-pmu(x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .action_msg = "Show Global PMU Regs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int __init sparc_sysrq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int ret = register_sysrq_key('y', &sparc_globalreg_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret = register_sysrq_key('x', &sparc_globalpmu_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) core_initcall(sparc_sysrq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Free current thread data structures etc.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) void exit_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct thread_info *t = task_thread_info(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (t->utraps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (t->utraps[0] < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) kfree (t->utraps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) t->utraps[0]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void flush_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct thread_info *t = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) mm = t->task->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) tsb_context_switch(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) set_thread_wsaved(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Clear FPU register state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) t->fpsaved[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* It's a bit more tricky when 64-bit tasks are involved... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bool stack_64bit = test_thread_64bit_stack(psp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned long fp, distance, rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (stack_64bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) csp += STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) psp += STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fp += STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (test_thread_flag(TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) fp &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Now align the stack as this is mandatory in the Sparc ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * due to how register windows work. This hides the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * restriction from thread libraries etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) csp &= ~15UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) distance = fp - psp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rval = (csp - distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) else if (!stack_64bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (put_user(((u32)csp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) &(((struct reg_window32 __user *)rval)->ins[6])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (put_user(((u64)csp - STACK_BIAS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) &(((struct reg_window __user *)rval)->ins[6])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) rval = rval - STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Standard stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static inline void shift_window_buffer(int first_win, int last_win,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct thread_info *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = first_win; i < last_win; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) memcpy(&t->reg_window[i], &t->reg_window[i+1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) sizeof(struct reg_window));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) void synchronize_user_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct thread_info *t = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) unsigned long window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) flush_user_windows();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if ((window = get_thread_wsaved()) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) window -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct reg_window *rwin = &t->reg_window[window];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int winsize = sizeof(struct reg_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) sp = t->rwbuf_stkptrs[window];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (test_thread_64bit_stack(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) sp += STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) winsize = sizeof(struct reg_window32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!copy_to_user((char __user *)sp, rwin, winsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) shift_window_buffer(window, get_thread_wsaved() - 1, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) set_thread_wsaved(get_thread_wsaved() - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) } while (window--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void stack_unaligned(unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static const char uwfault32[] = KERN_INFO \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static const char uwfault64[] = KERN_INFO \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) void fault_in_user_windows(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct thread_info *t = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned long window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) flush_user_windows();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) window = get_thread_wsaved();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (likely(window != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) window -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct reg_window *rwin = &t->reg_window[window];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int winsize = sizeof(struct reg_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned long sp, orig_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) orig_sp = sp = t->rwbuf_stkptrs[window];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (test_thread_64bit_stack(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) sp += STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) winsize = sizeof(struct reg_window32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (unlikely(sp & 0x7UL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) stack_unaligned(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (unlikely(copy_to_user((char __user *)sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rwin, winsize))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) printk_ratelimited(is_compat_task() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) uwfault32 : uwfault64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) sp, orig_sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) regs->tpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) regs->u_regs[UREG_I7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto barf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) } while (window--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) set_thread_wsaved(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) barf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) set_thread_wsaved(window + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Copy a Sparc thread. The fork() return value conventions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * under SunOS are nothing short of bletcherous:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Parent --> %o0 == childs pid, %o1 == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Child --> %o0 == parents pid, %o1 == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct task_struct *p, unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct thread_info *t = task_thread_info(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct sparc_stackf *parent_sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) unsigned long child_stack_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) char *child_trap_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Calculate offset to stack_frame & pt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) child_trap_frame = (task_stack_page(p) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) (THREAD_SIZE - child_stack_sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) t->new_child = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) t->kregs = (struct pt_regs *) (child_trap_frame +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) sizeof(struct sparc_stackf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) t->fpsaved[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (unlikely(p->flags & PF_KTHREAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) memset(child_trap_frame, 0, child_stack_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) (current_pt_regs()->tstate + 1) & TSTATE_CWP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) t->current_ds = ASI_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) t->kregs->u_regs[UREG_G1] = sp; /* function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) t->kregs->u_regs[UREG_G2] = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) parent_sf = ((struct sparc_stackf *) regs) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) memcpy(child_trap_frame, parent_sf, child_stack_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (t->flags & _TIF_32BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) sp &= 0x00000000ffffffffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) t->kregs->u_regs[UREG_FP] = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) (regs->tstate + 1) & TSTATE_CWP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) t->current_ds = ASI_AIUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (sp != regs->u_regs[UREG_FP]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long csp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!csp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) t->kregs->u_regs[UREG_FP] = csp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (t->utraps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) t->utraps[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Set the return value for the child. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) t->kregs->u_regs[UREG_I0] = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) t->kregs->u_regs[UREG_I1] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Set the second return value for the parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) regs->u_regs[UREG_I1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (clone_flags & CLONE_SETTLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) t->kregs->u_regs[UREG_G7] = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* TIF_MCDPER in thread info flags for current task is updated lazily upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * a context switch. Update this flag in current task's thread flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * before dup so the dup'd task will inherit the current TIF_MCDPER flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (adi_capable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) register unsigned long tmp_mcdper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ".word 0x83438000\n\t" /* rd %mcdper, %g1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) "mov %%g1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) : "=r" (tmp_mcdper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (tmp_mcdper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) set_thread_flag(TIF_MCDPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) clear_thread_flag(TIF_MCDPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) unsigned long get_wchan(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned long pc, fp, bias = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct thread_info *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct reg_window *rw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!task || task == current ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) task->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tp = task_thread_info(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) bias = STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) fp = task_thread_info(task)->ksp + bias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!kstack_valid(tp, fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rw = (struct reg_window *) fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pc = rw->ins[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!in_sched_functions(pc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ret = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) fp = rw->ins[6] + bias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } while (++count < 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }