^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Port on Texas Instruments TMS320C6x architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* hooks for board specific support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void (*c6x_restart)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void (*c6x_halt)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) extern asmlinkage void ret_from_fork(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern asmlinkage void ret_from_kernel_thread(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * power off function, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void (*pm_power_off)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) EXPORT_SYMBOL(pm_power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Put local_irq_enable and idle in same execute packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * to make them atomic and avoid race to idle with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * interrupts enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) asm volatile (" mvc .s2 CSR,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) " or .d2 1,%0,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) " mvc .s2 %0,CSR\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "|| idle\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) : "=b"(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void halt_loop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) printk(KERN_EMERG "System Halted, OK to turn off power\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) asm volatile("idle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void machine_restart(char *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (c6x_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) c6x_restart();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) halt_loop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void machine_halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (c6x_halt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) c6x_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) halt_loop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void machine_power_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (pm_power_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) pm_power_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) halt_loop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void flush_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Do necessary setup to start up a newly executed thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * The binfmt loader will setup a "full" stack, but the C6X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * operates an "empty" stack. So we adjust the usp so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * argc doesn't get destroyed if an interrupt is taken before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * it is read from the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * NB: Library startup code needs to match this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) usp -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) regs->pc = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) regs->sp = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) regs->tsr |= 0x40; /* set user mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) current->thread.usp = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Copy a new thread context in its stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int copy_thread(unsigned long clone_flags, unsigned long usp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long ustk_size, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct pt_regs *childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) childregs = task_pt_regs(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (unlikely(p->flags & PF_KTHREAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* case of __kernel_thread: we return to supervisor space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) memset(childregs, 0, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) childregs->sp = (unsigned long)(childregs + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) p->thread.pc = (unsigned long) ret_from_kernel_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) childregs->a0 = usp; /* function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) childregs->a1 = ustk_size; /* argument */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Otherwise use the given stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *childregs = *current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (usp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) childregs->sp = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) p->thread.pc = (unsigned long) ret_from_fork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Set usp/ksp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) p->thread.usp = childregs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) thread_saved_ksp(p) = (unsigned long)childregs - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) p->thread.wchan = p->thread.pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #ifdef __DSBT__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) asm volatile ("mv .S2 b14,%0\n" : "=b"(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) thread_saved_dp(p) = dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (usp == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) childregs->dp = dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return p->thread.wchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }