^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Low-level system-call handling, trap handlers and context-switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008-2009 PetaLogix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2001,2002 NEC Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This file is subject to the terms and conditions of the GNU General
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Public License. See the file COPYING in the main directory of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * archive for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Written by Miles Bader <miles@gnu.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Heavily modified by John Williams for Microblaze
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sys.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/entry.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/exceptions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Create space for syscalls counting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .section .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .global syscall_debug_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .align 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) syscall_debug_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .space (__NR_syscalls * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif /* DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define C_ENTRY(name) .globl name; .align 4; name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Various ways of setting and clearing BIP in flags reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * This is mucky, but necessary using microblaze version that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * allows msr ops to write to BIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .macro clear_bip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) msrclr r0, MSR_BIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .macro set_bip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) msrset r0, MSR_BIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .macro clear_eip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) msrclr r0, MSR_EIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .macro set_ee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) msrset r0, MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .macro disable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) msrclr r0, MSR_IE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .macro enable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) msrset r0, MSR_IE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .macro set_ums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) msrset r0, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) msrclr r0, MSR_VMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .macro set_vms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) msrclr r0, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) msrset r0, MSR_VMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .macro clear_ums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) msrclr r0, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .macro clear_vms_ums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) msrclr r0, MSR_VMS | MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .macro clear_bip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) andi r11, r11, ~MSR_BIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .macro set_bip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ori r11, r11, MSR_BIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .macro clear_eip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) andi r11, r11, ~MSR_EIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .macro set_ee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ori r11, r11, MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .macro disable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) andi r11, r11, ~MSR_IE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .macro enable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ori r11, r11, MSR_IE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .macro set_ums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ori r11, r11, MSR_VMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) andni r11, r11, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .macro set_vms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ori r11, r11, MSR_VMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) andni r11, r11, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mts rmsr, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .macro clear_ums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) andni r11, r11, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mts rmsr,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) .macro clear_vms_ums
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mfs r11, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) andni r11, r11, (MSR_VMS|MSR_UMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mts rmsr,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Define how to call high-level functions. With MMU, virtual mode must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * enabled when calling the high-level function. Clobbers R11.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* turn on virtual protected mode save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define VM_ON \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) set_ums; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rted r0, 2f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) nop; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* turn off virtual protected mode save and user mode save*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define VM_OFF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) clear_vms_ums; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rted r0, TOPHYS(1f); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) nop; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define SAVE_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) swi r2, r1, PT_R2; /* Save SDA */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) swi r3, r1, PT_R3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) swi r4, r1, PT_R4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) swi r5, r1, PT_R5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) swi r6, r1, PT_R6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) swi r7, r1, PT_R7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) swi r8, r1, PT_R8; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) swi r9, r1, PT_R9; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) swi r10, r1, PT_R10; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) swi r11, r1, PT_R11; /* save clobbered regs after rval */\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) swi r12, r1, PT_R12; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) swi r13, r1, PT_R13; /* Save SDA2 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) swi r15, r1, PT_R15; /* Save LP */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) swi r16, r1, PT_R16; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) swi r17, r1, PT_R17; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) swi r18, r1, PT_R18; /* Save asm scratch reg */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) swi r19, r1, PT_R19; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) swi r20, r1, PT_R20; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) swi r21, r1, PT_R21; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) swi r22, r1, PT_R22; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) swi r23, r1, PT_R23; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) swi r24, r1, PT_R24; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) swi r25, r1, PT_R25; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) swi r26, r1, PT_R26; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) swi r27, r1, PT_R27; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) swi r28, r1, PT_R28; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) swi r29, r1, PT_R29; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) swi r30, r1, PT_R30; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) swi r31, r1, PT_R31; /* Save current task reg */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mfs r11, rmsr; /* save MSR */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) swi r11, r1, PT_MSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define RESTORE_REGS_GP \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) lwi r2, r1, PT_R2; /* restore SDA */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) lwi r3, r1, PT_R3; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) lwi r4, r1, PT_R4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) lwi r5, r1, PT_R5; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) lwi r6, r1, PT_R6; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) lwi r7, r1, PT_R7; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) lwi r8, r1, PT_R8; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) lwi r9, r1, PT_R9; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) lwi r10, r1, PT_R10; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) lwi r12, r1, PT_R12; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) lwi r13, r1, PT_R13; /* restore SDA2 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) lwi r15, r1, PT_R15; /* restore LP */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) lwi r16, r1, PT_R16; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) lwi r17, r1, PT_R17; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) lwi r19, r1, PT_R19; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) lwi r20, r1, PT_R20; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) lwi r21, r1, PT_R21; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) lwi r22, r1, PT_R22; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) lwi r23, r1, PT_R23; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) lwi r24, r1, PT_R24; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) lwi r25, r1, PT_R25; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) lwi r26, r1, PT_R26; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) lwi r27, r1, PT_R27; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) lwi r28, r1, PT_R28; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) lwi r29, r1, PT_R29; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) lwi r30, r1, PT_R30; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) lwi r31, r1, PT_R31; /* Restore cur task reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define RESTORE_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) lwi r11, r1, PT_MSR; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mts rmsr , r11; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) RESTORE_REGS_GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define RESTORE_REGS_RTBD \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) lwi r11, r1, PT_MSR; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) andni r11, r11, MSR_EIP; /* clear EIP */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mts rmsr , r11; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) RESTORE_REGS_GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define SAVE_STATE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* See if already in kernel mode.*/ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) mfs r1, rmsr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) andi r1, r1, MSR_UMS; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) bnei r1, 1f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Kernel-mode state save. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Reload kernel stack-ptr. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* FIXME: I can add these two lines to one */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* tophys(r1,r1); */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* addik r1, r1, -PT_SIZE; */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) SAVE_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) brid 2f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) swi r1, r1, PT_MODE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 1: /* User-mode state save. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) tophys(r1,r1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* MS these three instructions can be added to one */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* addik r1, r1, THREAD_SIZE; */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* tophys(r1,r1); */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* addik r1, r1, -PT_SIZE; */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) SAVE_REGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) swi r11, r1, PT_R1; /* Store user SP. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) swi r0, r1, PT_MODE; /* Was in user-mode. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* MS: I am clearing UMS even in case when I come from kernel space */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) clear_ums; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * User trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * System calls are handled here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Syscall protocol:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Syscall number in r12, args in r5-r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Return value in r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Trap entered via brki instruction, so BIP bit is set, and interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * are masked. This is nice, means we don't have to CLI before state save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) C_ENTRY(_user_exception):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) addi r14, r14, 4 /* return address is 4 byte after call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* calculate kernel stack pointer from task struct 8k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) addik r1, r1, THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) addik r1, r1, -PT_SIZE; /* Make room on the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) SAVE_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) swi r0, r1, PT_R3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) swi r0, r1, PT_R4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) swi r0, r1, PT_MODE; /* Was in user-mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) swi r11, r1, PT_R1; /* Store user SP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) clear_ums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Save away the syscall number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) swi r12, r1, PT_R0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* where the trap should return need -8 to adjust for rtsd r15, 8*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Jump to the appropriate function for the system call number in r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * (r12 is not preserved), or return an error if r12 is not valid. The LP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * register should point to the location where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Step into virtual mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rtbd r0, 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) lwi r11, r11, TI_FLAGS /* get flags in thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) andi r11, r11, _TIF_WORK_SYSCALL_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) beqi r11, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) addik r3, r0, -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) swi r3, r1, PT_R3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) brlid r15, do_syscall_trace_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) addik r5, r1, PT_R0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) # do_syscall_trace_enter returns the new syscall nr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) addk r12, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) lwi r5, r1, PT_R5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) lwi r6, r1, PT_R6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) lwi r7, r1, PT_R7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) lwi r8, r1, PT_R8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) lwi r9, r1, PT_R9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) lwi r10, r1, PT_R10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Jump to the appropriate function for the system call number in r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * (r12 is not preserved), or return an error if r12 is not valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * The LP register should point to the location where the called function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * should return. [note that MAKE_SYS_CALL uses label 1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* See if the system call number is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) blti r12, 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) addi r11, r12, -__NR_syscalls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) bgei r11, 5f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Figure out which function to use for this system call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Note Microblaze barrel shift is optional, so don't rely on it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) add r12, r12, r12; /* convert num -> ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) add r12, r12, r12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) addi r30, r0, 1 /* restarts allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Trac syscalls and stored them to syscall_debug_table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* The first syscall location stores total syscall number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) lwi r3, r0, syscall_debug_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) addi r3, r3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) swi r3, r0, syscall_debug_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) lwi r3, r12, syscall_debug_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) addi r3, r3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) swi r3, r12, syscall_debug_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) # Find and jump into the syscall handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) lwi r12, r12, sys_call_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* where the trap should return need -8 to adjust for rtsd r15, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) addi r15, r0, ret_from_trap-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bra r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* The syscall number is invalid, return an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) braid ret_from_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) addi r3, r0, -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Entry point used to return from a syscall/trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* We re-enable BIP bit before state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) C_ENTRY(ret_from_trap):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) swi r3, r1, PT_R3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) swi r4, r1, PT_R4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) lwi r11, r1, PT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* See if returning to kernel mode, if so, skip resched &c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bnei r11, 2f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* We're returning to user mode, so check for various conditions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * trigger rescheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* FIXME: Restructure all these flag checks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) lwi r11, r11, TI_FLAGS; /* get flags in thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) andi r11, r11, _TIF_WORK_SYSCALL_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) beqi r11, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) brlid r15, do_syscall_trace_leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) addik r5, r1, PT_R0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* We're returning to user mode, so check for various conditions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * trigger rescheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* get thread info from current task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) lwi r11, CURRENT_TASK, TS_THREAD_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) lwi r19, r11, TI_FLAGS; /* get flags in thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) andi r11, r19, _TIF_NEED_RESCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) beqi r11, 5f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) bralid r15, schedule; /* Call scheduler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) nop; /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Maybe handle a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) beqi r11, 4f; /* Signals to handle, handle them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bralid r15, do_notify_resume; /* Handle any signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) add r6, r30, r0; /* Arg 2: int in_syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) add r30, r0, r0 /* no more restarts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Finally, return to user state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 4: set_bip; /* Ints masked for state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) RESTORE_REGS_RTBD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) addik r1, r1, PT_SIZE /* Clean up stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) bri 6f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Return to kernel state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 2: set_bip; /* Ints masked for state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) RESTORE_REGS_RTBD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) addik r1, r1, PT_SIZE /* Clean up stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) tovirt(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) TRAP_return: /* Make global symbol for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rtbd r14, 0; /* Instructions to return from an IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* This the initial entry point for a new child thread, with an appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) stack in place that makes it look the the child is in the middle of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) syscall. This function is actually `returned to' from switch_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) (copy_thread makes ret_from_fork the return address in each new thread's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) saved context). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) C_ENTRY(ret_from_fork):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) add r5, r3, r0; /* switch_thread returns the prev task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* ( in the delay slot ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) brid ret_from_trap; /* Do normal trap return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) add r3, r0, r0; /* Child's fork call should return 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) C_ENTRY(ret_from_kernel_thread):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) add r5, r3, r0; /* switch_thread returns the prev task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* ( in the delay slot ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) brald r15, r20 /* fn was left in r20 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) addk r5, r0, r19 /* ... and argument - in r19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) brid ret_from_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) add r3, r0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) C_ENTRY(sys_rt_sigreturn_wrapper):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) addik r30, r0, 0 /* no restarts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) brid sys_rt_sigreturn /* Do real work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) addik r5, r1, 0; /* add user context as 1st arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * HW EXCEPTION rutine start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) C_ENTRY(full_exception_trap):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* adjust exception address for privileged instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * for finding where is it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) addik r17, r17, -4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) SAVE_STATE /* Save registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* PC, before IRQ/trap - this is one instruction above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) swi r17, r1, PT_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* FIXME this can be store directly in PT_ESR reg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * I tested it but there is a fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* where the trap should return need -8 to adjust for rtsd r15, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) addik r15, r0, ret_from_exc - 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mfs r6, resr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mfs r7, rfsr; /* save FSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mts rfsr, r0; /* Clear sticky fsr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) rted r0, full_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) addik r5, r1, 0 /* parameter struct pt_regs * regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Unaligned data trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Unaligned data trap last on 4k page is handled here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Trap entered via exception, so EE bit is set, and interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * are masked. This is nice, means we don't have to CLI before state save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) C_ENTRY(unaligned_data_trap):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* MS: I have to save r11 value and then restore it because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * set_bit, clear_eip, set_ee use r11 as temp register if MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * instructions are not used. We don't need to do if MSR instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * are used and they use r0 instead of r11.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * I am using ENTRY_SP which should be primary used only for stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * pointer saving. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) set_bip; /* equalize initial state for all possible entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) clear_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) set_ee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) SAVE_STATE /* Save registers.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* PC, before IRQ/trap - this is one instruction above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) swi r17, r1, PT_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* where the trap should return need -8 to adjust for rtsd r15, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) addik r15, r0, ret_from_exc-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mfs r3, resr /* ESR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mfs r4, rear /* EAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rtbd r0, _unaligned_data_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) addik r7, r1, 0 /* parameter struct pt_regs * regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Page fault traps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * If the real exception handler (from hw_exception_handler.S) didn't find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * the mapping for the process, then we're thrown here to handle such situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Trap entered via exceptions, so EE bit is set, and interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * are masked. This is nice, means we don't have to CLI before state save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Build a standard exception frame for TLB Access errors. All TLB exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * will bail out to this point if they can't resolve the lightweight TLB fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * The C function called is in "arch/microblaze/mm/fault.c", declared as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * void do_page_fault(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * unsigned long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* data and intruction trap - which is choose is resolved int fault.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) C_ENTRY(page_fault_data_trap):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) SAVE_STATE /* Save registers.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* PC, before IRQ/trap - this is one instruction above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) swi r17, r1, PT_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* where the trap should return need -8 to adjust for rtsd r15, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) addik r15, r0, ret_from_exc-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mfs r6, rear /* parameter unsigned long address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) mfs r7, resr /* parameter unsigned long error_code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rted r0, do_page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) addik r5, r1, 0 /* parameter struct pt_regs * regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) C_ENTRY(page_fault_instr_trap):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) SAVE_STATE /* Save registers.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* PC, before IRQ/trap - this is one instruction above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) swi r17, r1, PT_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* where the trap should return need -8 to adjust for rtsd r15, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) addik r15, r0, ret_from_exc-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mfs r6, rear /* parameter unsigned long address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ori r7, r0, 0 /* parameter unsigned long error_code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rted r0, do_page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) addik r5, r1, 0 /* parameter struct pt_regs * regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Entry point used to return from an exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) C_ENTRY(ret_from_exc):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) lwi r11, r1, PT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) bnei r11, 2f; /* See if returning to kernel mode, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* ... if so, skip resched &c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* We're returning to user mode, so check for various conditions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) trigger rescheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) lwi r19, r11, TI_FLAGS; /* get flags in thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) andi r11, r19, _TIF_NEED_RESCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) beqi r11, 5f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Call the scheduler before returning from a syscall/trap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) bralid r15, schedule; /* Call scheduler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) nop; /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Maybe handle a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) beqi r11, 4f; /* Signals to handle, handle them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Handle a signal return; Pending signals should be in r18.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Not all registers are saved by the normal trap/interrupt entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * points (for instance, call-saved registers (because the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * C-compiler calling sequence in the kernel makes sure they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * preserved), and call-clobbered registers in the case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * traps), but signal handlers may want to examine or change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * complete register state. Here we save anything not saved by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * the normal entry sequence, so that it may be safely restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * (in a possibly modified form) after do_notify_resume returns. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) bralid r15, do_notify_resume; /* Handle any signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) addi r6, r0, 0; /* Arg 2: int in_syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Finally, return to user state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 4: set_bip; /* Ints masked for state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) RESTORE_REGS_RTBD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) addik r1, r1, PT_SIZE /* Clean up stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bri 6f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* Return to kernel state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 2: set_bip; /* Ints masked for state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) RESTORE_REGS_RTBD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) addik r1, r1, PT_SIZE /* Clean up stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) tovirt(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) EXC_return: /* Make global symbol for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) rtbd r14, 0; /* Instructions to return from an IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * HW EXCEPTION rutine end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Hardware maskable interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * The stack-pointer (r1) should have already been saved to the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * location PER_CPU(ENTRY_SP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) C_ENTRY(_interrupt):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* MS: we are in physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Save registers, switch to proper stack, convert SP to virtual.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* MS: See if already in kernel mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) mfs r1, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) andi r1, r1, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) bnei r1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Kernel-mode state save. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* save registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* MS: Make room on the stack -> activation record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) addik r1, r1, -PT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) SAVE_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) brid 2f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* User-mode state save. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* MS: get the saved current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) lwi r1, r1, TS_THREAD_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) addik r1, r1, THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* save registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) addik r1, r1, -PT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) SAVE_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* calculate mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) swi r0, r1, PT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) swi r11, r1, PT_R1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) clear_ums;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) addik r15, r0, irq_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) irq_call:rtbd r0, do_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) addik r5, r1, 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* MS: we are in virtual mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ret_from_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) lwi r11, r1, PT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bnei r11, 2f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) lwi r11, CURRENT_TASK, TS_THREAD_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) andi r11, r19, _TIF_NEED_RESCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) beqi r11, 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) bralid r15, schedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) nop; /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* Maybe handle a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) beqid r11, no_intr_resched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Handle a signal return; Pending signals should be in r18. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bralid r15, do_notify_resume; /* Handle any signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) addi r6, r0, 0; /* Arg 2: int in_syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Finally, return to user state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) no_intr_resched:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Disable interrupts, we are now committed to the state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) disable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) RESTORE_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) lwi r1, r1, PT_R1 - PT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) bri 6f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* MS: Return to kernel state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) lwi r11, CURRENT_TASK, TS_THREAD_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* MS: get preempt_count from thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) lwi r5, r11, TI_PREEMPT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) bgti r5, restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) lwi r5, r11, TI_FLAGS; /* get flags in thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) andi r5, r5, _TIF_NEED_RESCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) beqi r5, restore /* if zero jump over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* interrupts are off that's why I am calling preempt_chedule_irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) bralid r15, preempt_schedule_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) VM_OFF /* MS: turn off MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) tophys(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) RESTORE_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) tovirt(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) IRQ_return: /* MS: Make global symbol for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) rtid r14, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * and call handling function with saved pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) C_ENTRY(_debug_exception):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* BIP bit is set on entry, no interrupts can occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) mfs r1, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) andi r1, r1, MSR_UMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) bnei r1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* MS: Kernel-mode state save - kgdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* BIP bit is set on entry, no interrupts can occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) SAVE_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* save all regs to pt_reg structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) swi r0, r1, PT_R0; /* R0 must be saved too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) swi r14, r1, PT_R14 /* rewrite saved R14 value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) swi r16, r1, PT_PC; /* PC and r16 are the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* save special purpose registers to pt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mfs r11, rear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) swi r11, r1, PT_EAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mfs r11, resr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) swi r11, r1, PT_ESR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) mfs r11, rfsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) swi r11, r1, PT_FSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* stack pointer is in physical address at it is decrease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * by PT_SIZE but we need to get correct R1 value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) swi r11, r1, PT_R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* MS: r31 - current pointer isn't changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #ifdef CONFIG_KGDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) addi r5, r1, 0 /* pass pt_reg address as the first arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) addik r15, r0, dbtrap_call; /* return address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) rtbd r0, microblaze_kgdb_break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* MS: Place handler for brki from kernel space if KGDB is OFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * It is very unlikely that another brki instruction is called. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) bri 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* MS: User-mode state save - gdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) addik r1, r1, -PT_SIZE; /* Make room on the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) SAVE_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) swi r16, r1, PT_PC; /* Save LP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) swi r0, r1, PT_MODE; /* Was in user-mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) swi r11, r1, PT_R1; /* Store user SP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) tovirt(r1,r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) set_vms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) addik r5, r1, 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) addik r15, r0, dbtrap_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) rtbd r0, sw_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* MS: The first instruction for the second part of the gdb/kgdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) set_bip; /* Ints masked for state restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) lwi r11, r1, PT_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) bnei r11, 2f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* MS: Return to user space - gdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Get current task ptr into r11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) lwi r19, r11, TI_FLAGS; /* get flags in thread info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) andi r11, r19, _TIF_NEED_RESCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) beqi r11, 5f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Call the scheduler before returning from a syscall/trap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) bralid r15, schedule; /* Call scheduler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) nop; /* delay slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Maybe handle a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) beqi r11, 4f; /* Signals to handle, handle them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bralid r15, do_notify_resume; /* Handle any signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) addi r6, r0, 0; /* Arg 2: int in_syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) bri 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* Finally, return to user state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* MS: Restore all regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) RESTORE_REGS_RTBD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) addik r1, r1, PT_SIZE /* Clean up stack space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) DBTRAP_return_user: /* MS: Make global symbol for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rtbd r16, 0; /* MS: Instructions to return from a debug trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* MS: Return to kernel state - kgdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 2: VM_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) tophys(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* MS: Restore all regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) RESTORE_REGS_RTBD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) lwi r14, r1, PT_R14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) lwi r16, r1, PT_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) tovirt(r1,r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) rtbd r16, 0; /* MS: Instructions to return from a debug trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ENTRY(_switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* prepare return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) addk r3, r0, CURRENT_TASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* save registers in cpu_context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* use r11 and r12, volatile registers, as temp register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* give start of cpu_context for previous process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) addik r11, r5, TI_CPU_CONTEXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) swi r1, r11, CC_R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) swi r2, r11, CC_R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* skip volatile registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * they are saved on stack when we jumped to _switch_to() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* dedicated registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) swi r13, r11, CC_R13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) swi r14, r11, CC_R14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) swi r15, r11, CC_R15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) swi r16, r11, CC_R16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) swi r17, r11, CC_R17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) swi r18, r11, CC_R18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* save non-volatile registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) swi r19, r11, CC_R19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) swi r20, r11, CC_R20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) swi r21, r11, CC_R21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) swi r22, r11, CC_R22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) swi r23, r11, CC_R23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) swi r24, r11, CC_R24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) swi r25, r11, CC_R25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) swi r26, r11, CC_R26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) swi r27, r11, CC_R27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) swi r28, r11, CC_R28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) swi r29, r11, CC_R29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) swi r30, r11, CC_R30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* special purpose registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) mfs r12, rmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) swi r12, r11, CC_MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) mfs r12, rear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) swi r12, r11, CC_EAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) mfs r12, resr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) swi r12, r11, CC_ESR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) mfs r12, rfsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) swi r12, r11, CC_FSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* update r31, the current-give me pointer to task which will be next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) lwi CURRENT_TASK, r6, TI_TASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* stored it to current_save too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* get new process' cpu context and restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* give me start where start context of next task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) addik r11, r6, TI_CPU_CONTEXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* non-volatile registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) lwi r30, r11, CC_R30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) lwi r29, r11, CC_R29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) lwi r28, r11, CC_R28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) lwi r27, r11, CC_R27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) lwi r26, r11, CC_R26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) lwi r25, r11, CC_R25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) lwi r24, r11, CC_R24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) lwi r23, r11, CC_R23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) lwi r22, r11, CC_R22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) lwi r21, r11, CC_R21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) lwi r20, r11, CC_R20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) lwi r19, r11, CC_R19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* dedicated registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) lwi r18, r11, CC_R18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) lwi r17, r11, CC_R17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) lwi r16, r11, CC_R16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) lwi r15, r11, CC_R15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) lwi r14, r11, CC_R14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) lwi r13, r11, CC_R13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* skip volatile registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) lwi r2, r11, CC_R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) lwi r1, r11, CC_R1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* special purpose registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) lwi r12, r11, CC_FSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) mts rfsr, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) lwi r12, r11, CC_MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) mts rmsr, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) rtsd r15, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ENTRY(_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) VM_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) brai 0; /* Jump to reset vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* These are compiled and loaded into high memory, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * copied into place in mach_early_setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) .section .init.ivt, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) #if CONFIG_MANUAL_RESET_VECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) .org 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) brai CONFIG_MANUAL_RESET_VECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .org 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) brai TOPHYS(_user_exception); /* syscall handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .org 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) brai TOPHYS(_interrupt); /* Interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .org 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) brai TOPHYS(_debug_exception); /* debug trap handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .org 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) brai TOPHYS(_hw_exception_handler); /* HW exception handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .section .rodata,"a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) #include "syscall_table.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) syscall_table_size=(.-sys_call_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) type_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .ascii "SYSCALL\0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) type_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .ascii "IRQ\0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) type_IRQ_PREEMPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .ascii "IRQ (PREEMPTED)\0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) type_SYSCALL_PREEMPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .ascii " SYSCALL (PREEMPTED)\0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Trap decoding for stack unwinder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Tuples are (start addr, end addr, string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * If return address lies on [start addr, end addr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * unwinder displays 'string'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .align 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .global microblaze_trap_handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) microblaze_trap_handlers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Exact matches come first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* Fuzzy matches go here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* End of table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) .word 0 ; .word 0 ; .word 0