^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Vineetg: Aug 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * -"C" version of lowest level context switch asm macro called by schedular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * gcc doesn't generate the dward CFI info for hand written asm, hence can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * backtrace out of it (e.g. tasks sleeping in kernel).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * So we cheat a bit by writing almost similar code in inline-asm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * -This is a hacky way of doing things, but there is no other simple way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * I don't want/intend to extend unwinding code to understand raw asm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct task_struct *__sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned int prev = (unsigned int)prev_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int next = (unsigned int)next_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* FP/BLINK save generated by gcc (standard function prologue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) "st.a r13, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) "st.a r14, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) "st.a r15, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) "st.a r16, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) "st.a r17, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) "st.a r18, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "st.a r19, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "st.a r20, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "st.a r21, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "st.a r22, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "st.a r23, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "st.a r24, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #ifndef CONFIG_ARC_CURR_IN_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) "st.a r25, [sp, -4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) "sub sp, sp, 4 \n\t" /* usual r25 placeholder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* set ksp of outgoing task in tsk->thread.ksp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #if KSP_WORD_OFF <= 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) "st.as sp, [%3, %1] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Workaround for NR_CPUS=4k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * %1 is bigger than 255 (S9 offset for st.as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) "add2 r24, %3, %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) "st sp, [r24] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * setup _current_task with incoming tsk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * optionally, set r25 to that as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * For SMP extra work to get to &_current_task[cpu]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * (open coded SET_CURR_TASK_ON_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "st %2, [@_current_task] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) "lr r24, [identity] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) "lsr r24, r24, 8 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) "bmsk r24, r24, 7 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) "add2 r24, @_current_task, r24 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "st %2, [r24] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifdef CONFIG_ARC_CURR_IN_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) "mov r25, %2 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* get ksp of incoming task from tsk->thread.ksp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "ld.as sp, [%2, %1] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* start loading it's CALLEE reg file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #ifndef CONFIG_ARC_CURR_IN_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) "ld.ab r25, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) "add sp, sp, 4 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "ld.ab r24, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "ld.ab r23, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) "ld.ab r22, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) "ld.ab r21, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "ld.ab r20, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) "ld.ab r19, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) "ld.ab r18, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) "ld.ab r17, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) "ld.ab r16, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) "ld.ab r15, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) "ld.ab r14, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) "ld.ab r13, [sp, 4] \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* last (ret value) = prev : although for ARC it mov r0, r0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "mov %0, %3 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* FP/BLINK restore generated by gcc (standard func epilogue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) : "=r"(tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) : "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) : "blink"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return (struct task_struct *)tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }