^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2008-2009 PetaLogix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2006 Atmark Techno, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/uaccess.h> /* for USER_DS macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void show_regs(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) show_regs_print_info(KERN_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) regs->r1, regs->r2, regs->r3, regs->r4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) regs->r5, regs->r6, regs->r7, regs->r8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) regs->r9, regs->r10, regs->r11, regs->r12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) regs->r13, regs->r14, regs->r15, regs->r16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) regs->r17, regs->r18, regs->r19, regs->r20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) regs->r21, regs->r22, regs->r23, regs->r24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) regs->r25, regs->r26, regs->r27, regs->r28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) regs->r29, regs->r30, regs->r31, regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) regs->msr, regs->ear, regs->esr, regs->fsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void (*pm_power_off)(void) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) EXPORT_SYMBOL(pm_power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void flush_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct task_struct *p, unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct pt_regs *childregs = task_pt_regs(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct thread_info *ti = task_thread_info(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (unlikely(p->flags & PF_KTHREAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* if we're creating a new kernel thread then just zeroing all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * the registers. That's OK for a brand new thread.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) memset(childregs, 0, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ti->cpu_context.r1 = (unsigned long)childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ti->cpu_context.r20 = (unsigned long)usp; /* fn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ti->cpu_context.r19 = (unsigned long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) childregs->pt_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) local_save_flags(childregs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ti->cpu_context.msr = childregs->msr & ~MSR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *childregs = *current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (usp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) childregs->r1 = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ti->cpu_context.r1 = (unsigned long)childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ti->cpu_context.msr = (unsigned long)childregs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) childregs->msr |= MSR_UMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* we should consider the fact that childregs is a copy of the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * regs which were saved immediately after entering the kernel state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * before enabling VM. This MSR will be restored in switch_to and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * RETURN() and we want to have the right machine state there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * specifically this state must have INTs disabled before and enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * after performing rtbd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * compose the right MSR for RETURN(). It will work for switch_to also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * excepting for VM and UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * don't touch UMS , CARRY and cache bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * right now MSR is a copy of parent one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) childregs->msr &= ~MSR_EIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) childregs->msr |= MSR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) childregs->msr &= ~MSR_VM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) childregs->msr |= MSR_VMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) childregs->msr |= MSR_EE; /* exceptions will be enabled*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ti->cpu_context.msr = (childregs->msr|MSR_VM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ti->cpu_context.msr &= ~MSR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * r21 is the thread reg, r10 is 6th arg to clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * which contains TLS area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (clone_flags & CLONE_SETTLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) childregs->r21 = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* TBD (used by procfs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Set up a thread for executing a new program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) regs->pc = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) regs->r1 = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) regs->pt_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) regs->msr |= MSR_UMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) regs->msr &= ~MSR_VM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #include <linux/elfcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Set up a thread for executing a new program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return 0; /* MicroBlaze has no separate FPU registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }