^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _KSTACK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _KSTACK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /* SP must be STACK_BIAS adjusted already. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned long base = (unsigned long) tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Stack pointer must be 16-byte aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) if (sp & (16UL - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) if (sp >= (base + sizeof(struct thread_info)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (hardirq_stack[tp->cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) base = (unsigned long) hardirq_stack[tp->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (sp >= base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) base = (unsigned long) softirq_stack[tp->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (sp >= base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Does "regs" point to a valid pt_regs trap frame? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long base = (unsigned long) tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long addr = (unsigned long) regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (addr >= base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) addr <= (base + THREAD_SIZE - sizeof(*regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) goto check_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (hardirq_stack[tp->cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) base = (unsigned long) hardirq_stack[tp->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (addr >= base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) addr <= (base + THREAD_SIZE - sizeof(*regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) goto check_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) base = (unsigned long) softirq_stack[tp->cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (addr >= base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) addr <= (base + THREAD_SIZE - sizeof(*regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) goto check_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) check_magic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (orig_sp < sp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) orig_sp > (sp + THREAD_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sp += THREAD_SIZE - 192 - STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return orig_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #endif /* _KSTACK_H */