^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/kernel/entry-armv.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996,1997,1998 Russell King.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Low-level vector interface routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * that causes it to save wrong values... Be aware!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/glue-df.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/glue-pf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/vfpmacros.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <mach/entry-macro.S>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/thread_notify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/system_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/uaccess-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "entry-header.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/entry-macro-multi.S>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/probes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Interrupt handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .macro irq_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ldr r1, =handle_arch_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) mov r0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) badr lr, 9997f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ldr pc, [r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) arch_irq_handler_default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 9997:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .macro pabt_helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifdef MULTI_PABORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ldr ip, .LCprocfns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) mov lr, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ldr pc, [ip, #PROCESSOR_PABT_FUNC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) bl CPU_PABORT_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) .macro dabt_helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) @ Call the processor-specific abort handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) @ r2 - pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) @ r4 - aborted context pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) @ r5 - aborted context psr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) @ The abort handler must return the aborted address in r0, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) @ the fault status register in r1. r9 must be preserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifdef MULTI_DABORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ldr ip, .LCprocfns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) mov lr, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ldr pc, [ip, #PROCESSOR_DABT_FUNC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bl CPU_DABORT_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .section .entry.text,"ax",%progbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Invalid mode handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .macro inv_entry, reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) sub sp, sp, #PT_REGS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ARM( stmib sp, {r1 - lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) THUMB( stmia sp, {r0 - r12} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) THUMB( str sp, [sp, #S_SP] )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) THUMB( str lr, [sp, #S_LR] )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mov r1, #\reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __pabt_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) inv_entry BAD_PREFETCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) b common_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ENDPROC(__pabt_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __dabt_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) inv_entry BAD_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) b common_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ENDPROC(__dabt_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) __irq_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) inv_entry BAD_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) b common_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ENDPROC(__irq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __und_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) inv_entry BAD_UNDEFINSTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) @ XXX fall through to common_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) @ common_invalid - generic code for failed exception (re-entrant version of handlers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) common_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) zero_fp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ldmia r0, {r4 - r6}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) add r0, sp, #S_PC @ here for interlock avoidance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mov r7, #-1 @ "" "" "" ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) str r4, [sp] @ save preserved r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) stmia r0, {r5 - r7} @ lr_<exception>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) @ cpsr_<exception>, "old_r0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mov r0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) b bad_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ENDPROC(__und_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * SVC mode handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define SPFIX(code...) code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define SPFIX(code...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .macro svc_entry, stack_hole=0, trace=1, uaccess=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) UNWIND(.fnstart )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) UNWIND(.save {r0 - pc} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #ifdef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) SPFIX( str r0, [sp] ) @ temporarily saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) SPFIX( mov r0, sp )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) SPFIX( tst r0, #4 ) @ test original stack alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) SPFIX( ldr r0, [sp] ) @ restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) SPFIX( tst sp, #4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) SPFIX( subeq sp, sp, #4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) stmia sp, {r1 - r12}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ldmia r0, {r3 - r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) add r7, sp, #S_SP - 4 @ here for interlock avoidance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mov r6, #-1 @ "" "" "" ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) SPFIX( addeq r2, r2, #4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) str r3, [sp, #-4]! @ save the "real" r0 copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) @ from the exception stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) mov r3, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) @ We are now ready to fill in the remaining blanks on the stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) @ r2 - sp_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) @ r3 - lr_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) @ r4 - lr_<exception>, already fixed up for correct return/restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) @ r5 - spsr_<exception>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) stmia r7, {r2 - r6}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) get_thread_info tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) uaccess_entry tsk, r0, r1, r2, \uaccess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .if \trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bl trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __dabt_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) svc_entry uaccess=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mov r2, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dabt_helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) svc_exit r5 @ return from exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ENDPROC(__dabt_svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __irq_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) svc_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) irq_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ldr r0, [tsk, #TI_FLAGS] @ get flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) teq r8, #0 @ if preempt count != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) movne r0, #0 @ force flags to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) tst r0, #_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) blne svc_preempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) svc_exit r5, irq = 1 @ return from exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ENDPROC(__irq_svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) svc_preempt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) mov r8, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 1: bl preempt_schedule_irq @ irq en/disable is done inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) tst r0, #_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) reteq r8 @ go again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) b 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __und_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) @ Correct the PC such that it is pointing at the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) @ which caused the fault. If the faulting instruction was ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) @ the PC will be pointing at the next instruction, and have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) @ subtract 4. Otherwise, it is Thumb, and the PC will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) @ pointing at the second half of the Thumb instruction. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) @ have to subtract 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ldr r2, [r0, #S_PC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sub r2, r2, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) str r2, [r0, #S_PC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) b do_undefinstr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ENDPROC(__und_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) __und_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_KPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) @ If a kprobe is about to simulate a "stmdb sp..." instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) @ it obviously needs free stack space which then will belong to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) @ the saved context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) svc_entry MAX_STACK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) svc_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mov r1, #4 @ PC correction to apply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) THUMB( movne r1, #2 ) @ if so, fix up PC correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mov r0, sp @ struct pt_regs *regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) bl __und_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __und_svc_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) get_thread_info tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ldr r5, [sp, #S_PSR] @ Get SVC cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) svc_exit r5 @ return from exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ENDPROC(__und_svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) __pabt_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) svc_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mov r2, sp @ regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pabt_helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) svc_exit r5 @ return from exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ENDPROC(__pabt_svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __fiq_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) svc_entry trace=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mov r0, sp @ struct pt_regs *regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bl handle_fiq_as_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) svc_exit_via_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ENDPROC(__fiq_svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .LCcralign:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .word cr_alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #ifdef MULTI_DABORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) .LCprocfns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .word processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) .LCfp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) .word fp_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Abort mode handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) @ and reuses the same macros. However in abort mode we must also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) @ save/restore lr_abt and spsr_abt to make nested aborts safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) __fiq_abt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) svc_entry trace=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) THUMB( msr cpsr_c, r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mov r1, lr @ Save lr_abt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mrs r2, spsr @ Save spsr_abt, abort is now safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) THUMB( msr cpsr_c, r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) stmfd sp!, {r1 - r2}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) add r0, sp, #8 @ struct pt_regs *regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) bl handle_fiq_as_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ldmfd sp!, {r1 - r2}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) THUMB( msr cpsr_c, r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mov lr, r1 @ Restore lr_abt, abort is unsafe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) msr spsr_cxsf, r2 @ Restore spsr_abt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) THUMB( msr cpsr_c, r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) svc_exit_via_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ENDPROC(__fiq_abt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * User mode handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #error "sizeof(struct pt_regs) must be a multiple of 8"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .macro usr_entry, trace=1, uaccess=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) UNWIND(.fnstart )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) UNWIND(.cantunwind ) @ don't unwind the user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) sub sp, sp, #PT_REGS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ARM( stmib sp, {r1 - r12} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) THUMB( stmia sp, {r0 - r12} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ATRAP( mrc p15, 0, r7, c1, c0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ATRAP( ldr r8, .LCcralign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ldmia r0, {r3 - r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) add r0, sp, #S_PC @ here for interlock avoidance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mov r6, #-1 @ "" "" "" ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) str r3, [sp] @ save the "real" r0 copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) @ from the exception stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ATRAP( ldr r8, [r8, #0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) @ We are now ready to fill in the remaining blanks on the stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) @ r4 - lr_<exception>, already fixed up for correct return/restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) @ r5 - spsr_<exception>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) @ Also, separately save sp_usr and lr_usr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) stmia r0, {r4 - r6}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ARM( stmdb r0, {sp, lr}^ )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .if \uaccess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) uaccess_disable ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) @ Enable the alignment trap while in kernel mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ATRAP( teq r8, r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ATRAP( mcrne p15, 0, r8, c1, c0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) @ Clear FP to mark the first stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) zero_fp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .if \trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bl trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ct_user_exit save = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .macro kuser_cmpxchg_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #warning "NPTL on non MMU needs fixing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) @ Make sure our user space atomic helper is restarted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) @ if it was interrupted in a critical region. Here we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) @ perform a quick test inline since it should be false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) @ 99.9999% of the time. The rest is done out of line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) cmp r4, #TASK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) blhs kuser_cmpxchg64_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __dabt_usr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) usr_entry uaccess=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kuser_cmpxchg_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mov r2, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dabt_helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) b ret_from_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ENDPROC(__dabt_usr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) __irq_usr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) usr_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) kuser_cmpxchg_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) irq_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) get_thread_info tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) mov why, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) b ret_to_user_from_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ENDPROC(__irq_usr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) __und_usr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) usr_entry uaccess=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mov r2, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mov r3, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) @ faulting instruction depending on Thumb mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) @ r3 = regs->ARM_cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) @ The emulation code returns using r9 if it has emulated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) @ instruction, or the more conventional lr if we are to treat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) @ this as a real undefined instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) badr r9, ret_from_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) @ IRQs must be enabled before attempting to read the instruction from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) @ user space since that could cause a page/translation fault if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) @ page table was modified by another CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) enable_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) tst r3, #PSR_T_BIT @ Thumb mode?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) bne __und_usr_thumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sub r4, r2, #4 @ ARM instr at LR - 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 1: ldrt r0, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ARM_BE8(rev r0, r0) @ little endian instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) uaccess_disable ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) @ r0 = 32-bit ARM instruction which caused the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) @ r2 = PC value for the following instruction (:= regs->ARM_pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) @ r4 = PC value for the faulting instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) @ lr = 32-bit undefined instruction function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) badr lr, __und_usr_fault_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) b call_fpe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) __und_usr_thumb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) @ Thumb instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) sub r4, r2, #2 @ First half of thumb instr at LR - 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * can never be supported in a single kernel, this code is not applicable at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * made about .arch directives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #if __LINUX_ARM_ARCH__ < 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define NEED_CPU_ARCHITECTURE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ldr r5, .LCcpu_architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ldr r5, [r5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) cmp r5, #CPU_ARCH_ARMv7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) blo __und_usr_fault_16 @ 16bit undefined instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * The following code won't get run unless the running CPU really is v7, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * coding round the lack of ldrht on older arches is pointless. Temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * override the assembler target arch with the minimum required instead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .arch armv6t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 2: ldrht r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ARM_BE8(rev16 r5, r5) @ little endian instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cmp r5, #0xe800 @ 32bit instruction if xx != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) blo __und_usr_fault_16_pan @ 16bit undefined instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 3: ldrht r0, [r2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ARM_BE8(rev16 r0, r0) @ little endian instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) uaccess_disable ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) orr r0, r0, r5, lsl #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) badr lr, __und_usr_fault_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) @ r0 = the two 16-bit Thumb instructions which caused the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) @ r4 = PC value for the first 16-bit Thumb instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) @ lr = 32bit undefined instruction function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #if __LINUX_ARM_ARCH__ < 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* If the target arch was overridden, change it back: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #ifdef CONFIG_CPU_32v6K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .arch armv6k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .arch armv6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #endif /* __LINUX_ARM_ARCH__ < 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) b __und_usr_fault_16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) UNWIND(.fnend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ENDPROC(__und_usr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * The out of line fixup for the ldrt instructions above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) .pushsection .text.fixup, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 4: str r4, [sp, #S_PC] @ retry current instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) .pushsection __ex_table,"a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) .long 1b, 4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .long 2b, 4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .long 3b, 4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * Check whether the instruction is a co-processor instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * If yes, we need to call the relevant co-processor handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Note that we don't do a full check here for the co-processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * instructions; all instructions with bit 27 set are well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * defined. The only instructions that should fault are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * co-processor instructions. However, we have to watch out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * for the ARM6/ARM7 SWI bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * NEON is a special case that has to be handled here. Not all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * NEON instructions are co-processor instructions, so we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * to make a special case of checking for them. Plus, there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * five groups of them, so we have a table of mask/opcode pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * to check against, and if any match then we branch off into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * NEON handler code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Emulators may wish to make use of the following registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * r2 = PC value to resume execution after successful emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * r9 = normal "successful" return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * r10 = this threads thread_info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * lr = unrecognised instruction return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * IRQs enabled, FIQs enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) @ Fall-through from Thumb-2 __und_usr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_NEON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) get_thread_info r10 @ get current thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) adr r6, .LCneon_thumb_opcodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) b 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) call_fpe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) get_thread_info r10 @ get current thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #ifdef CONFIG_NEON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) adr r6, .LCneon_arm_opcodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 2: ldr r5, [r6], #4 @ mask value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ldr r7, [r6], #4 @ opcode bits matching in mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cmp r5, #0 @ end mask?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) and r8, r0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) cmp r8, r7 @ NEON instruction?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) bne 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) mov r7, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) b do_vfp @ let VFP handler handle this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) reteq lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) and r8, r0, #0x00000f00 @ mask out CP number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) mov r7, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #ifdef CONFIG_IWMMXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) @ Test if we need to give access to iWMMXt coprocessors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ldr r5, [r10, #TI_FLAGS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bcs iwmmxt_task_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ARM( add pc, pc, r8, lsr #6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) THUMB( lsr r8, r8, #6 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) THUMB( add pc, r8 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ret.w lr @ CP#0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) W(b) do_fpe @ CP#1 (FPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) W(b) do_fpe @ CP#2 (FPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret.w lr @ CP#3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #ifdef CONFIG_CRUNCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) b crunch_task_enable @ CP#4 (MaverickCrunch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) b crunch_task_enable @ CP#5 (MaverickCrunch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) b crunch_task_enable @ CP#6 (MaverickCrunch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret.w lr @ CP#4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret.w lr @ CP#5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ret.w lr @ CP#6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret.w lr @ CP#7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret.w lr @ CP#8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ret.w lr @ CP#9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) W(b) do_vfp @ CP#10 (VFP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) W(b) do_vfp @ CP#11 (VFP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ret.w lr @ CP#10 (VFP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ret.w lr @ CP#11 (VFP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ret.w lr @ CP#12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ret.w lr @ CP#13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ret.w lr @ CP#14 (Debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ret.w lr @ CP#15 (Control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #ifdef NEED_CPU_ARCHITECTURE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) .LCcpu_architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) .word __cpu_architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #ifdef CONFIG_NEON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .align 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .LCneon_arm_opcodes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .word 0xfe000000 @ mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) .word 0xf2000000 @ opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) .word 0xff100000 @ mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .word 0xf4000000 @ opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) .word 0x00000000 @ mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) .word 0x00000000 @ opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) .LCneon_thumb_opcodes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) .word 0xef000000 @ mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) .word 0xef000000 @ opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) .word 0xff100000 @ mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) .word 0xf9000000 @ opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) .word 0x00000000 @ mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .word 0x00000000 @ opcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) do_fpe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ldr r4, .LCfp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) add r10, r10, #TI_FPSTATE @ r10 = workspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ldr pc, [r4] @ Call FP module USR entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * The FP module is called with these registers set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * r0 = instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * r2 = PC+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * r9 = normal "successful" return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * r10 = FP workspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * lr = unrecognised FP instruction return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) .pushsection .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ENTRY(fp_enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .word no_fp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ENTRY(no_fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ENDPROC(no_fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) __und_usr_fault_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) mov r1, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) b 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) __und_usr_fault_16_pan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) uaccess_disable ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) __und_usr_fault_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mov r1, #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 1: mov r0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) badr lr, ret_from_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) b __und_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ENDPROC(__und_usr_fault_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ENDPROC(__und_usr_fault_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) __pabt_usr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) usr_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) mov r2, sp @ regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pabt_helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * This is the return code to user mode for abort handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ENTRY(ret_from_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) UNWIND(.fnstart )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) UNWIND(.cantunwind )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) get_thread_info tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mov why, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) b ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ENDPROC(__pabt_usr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ENDPROC(ret_from_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) __fiq_usr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) usr_entry trace=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) kuser_cmpxchg_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mov r0, sp @ struct pt_regs *regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) bl handle_fiq_as_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) get_thread_info tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) restore_user_regs fast = 0, offset = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ENDPROC(__fiq_usr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Register switch for ARMv3 and ARMv4 processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * previous and next are guaranteed not to be the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ENTRY(__switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) UNWIND(.fnstart )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) UNWIND(.cantunwind )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) add ip, r1, #TI_CPU_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) THUMB( str sp, [ip], #4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) THUMB( str lr, [ip], #4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ldr r4, [r2, #TI_TP_VALUE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ldr r5, [r2, #TI_TP_VALUE + 4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) #ifdef CONFIG_CPU_USE_DOMAINS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mrc p15, 0, r6, c3, c0, 0 @ Get domain register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ldr r6, [r2, #TI_CPU_DOMAIN]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) switch_tls r1, r4, r5, r3, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ldr r7, [r2, #TI_TASK]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ldr r8, =__stack_chk_guard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .if (TSK_STACK_CANARY > IMM12_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) #ifdef CONFIG_CPU_USE_DOMAINS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) mcr p15, 0, r6, c3, c0, 0 @ Set domain register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mov r5, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) add r4, r2, #TI_CPU_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ldr r0, =thread_notify_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) mov r1, #THREAD_NOTIFY_SWITCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) bl atomic_notifier_call_chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) str r7, [r8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) THUMB( mov ip, r4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mov r0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) THUMB( ldr sp, [ip], #4 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) THUMB( ldr pc, [ip] )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ENDPROC(__switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) __INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * User helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * Each segment is 32-byte aligned and will be moved to the top of the high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * vector page. New segments (if ever needed) must be added in front of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * existing ones. This mechanism should be used only for things that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * really small and justified, and not be abused freely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) THUMB( .arm )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .macro usr_ret, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) #ifdef CONFIG_ARM_THUMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) bx \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .macro kuser_pad, sym, size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .if (. - \sym) & 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .rept 4 - (. - \sym) & 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) .byte 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) .rept (\size - (. - \sym)) / 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .word 0xe7fddef1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) #ifdef CONFIG_KUSER_HELPERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) .globl __kuser_helper_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) __kuser_helper_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) __kuser_cmpxchg64: @ 0xffff0f60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #if defined(CONFIG_CPU_32v6K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) stmfd sp!, {r4, r5, r6, r7}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ldrd r4, r5, [r0] @ load old val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ldrd r6, r7, [r1] @ load new val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) smp_dmb arm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 1: ldrexd r0, r1, [r2] @ load current val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) eors r3, r0, r4 @ compare with oldval (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) eorseq r3, r1, r5 @ compare with oldval (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) strexdeq r3, r6, r7, [r2] @ store newval if eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) teqeq r3, #1 @ success?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) beq 1b @ if no then retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) smp_dmb arm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) rsbs r0, r3, #0 @ set returned val and C flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ldmfd sp!, {r4, r5, r6, r7}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) usr_ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #elif !defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * The only thing that can break atomicity in this cmpxchg64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * implementation is either an IRQ or a data abort exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * causing another process/thread to be scheduled in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * the critical sequence. The same strategy as for cmpxchg is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) stmfd sp!, {r4, r5, r6, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ldmia r0, {r4, r5} @ load old val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ldmia r1, {r6, lr} @ load new val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 1: ldmia r2, {r0, r1} @ load current val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) eors r3, r0, r4 @ compare with oldval (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) eorseq r3, r1, r5 @ compare with oldval (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 2: stmiaeq r2, {r6, lr} @ store newval if eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) rsbs r0, r3, #0 @ set return val and C flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ldmfd sp!, {r4, r5, r6, pc}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) kuser_cmpxchg64_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) @ Called from kuser_cmpxchg_fixup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) @ r4 = address of interrupted insn (must be preserved).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) @ sp = saved regs. r7 and r8 are clobbered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) @ 1b = first critical insn, 2b = last critical insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) mov r7, #0xffff0fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) subs r8, r4, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) rsbscs r8, r8, #(2b - 1b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) strcs r7, [sp, #S_PC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) #if __LINUX_ARM_ARCH__ < 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) bcc kuser_cmpxchg32_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #warning "NPTL on non MMU needs fixing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) mov r0, #-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) adds r0, r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) usr_ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #error "incoherent kernel configuration"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) kuser_pad __kuser_cmpxchg64, 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) __kuser_memory_barrier: @ 0xffff0fa0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) smp_dmb arm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) usr_ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) kuser_pad __kuser_memory_barrier, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) __kuser_cmpxchg: @ 0xffff0fc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) #if __LINUX_ARM_ARCH__ < 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * The only thing that can break atomicity in this cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * implementation is either an IRQ or a data abort exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * causing another process/thread to be scheduled in the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * of the critical sequence. To prevent this, code is added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * the IRQ and data abort exception handlers to set the pc back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * to the beginning of the critical section if it is found to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * within that critical section (see kuser_cmpxchg_fixup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 1: ldr r3, [r2] @ load current val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) subs r3, r3, r0 @ compare with oldval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 2: streq r1, [r2] @ store newval if eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) rsbs r0, r3, #0 @ set return val and C flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) usr_ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) kuser_cmpxchg32_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) @ Called from kuser_cmpxchg_check macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) @ r4 = address of interrupted insn (must be preserved).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) @ sp = saved regs. r7 and r8 are clobbered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) @ 1b = first critical insn, 2b = last critical insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) mov r7, #0xffff0fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) subs r8, r4, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) rsbscs r8, r8, #(2b - 1b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) strcs r7, [sp, #S_PC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) #warning "NPTL on non MMU needs fixing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) mov r0, #-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) adds r0, r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) usr_ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) smp_dmb arm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 1: ldrex r3, [r2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) subs r3, r3, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) strexeq r3, r1, [r2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) teqeq r3, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) beq 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rsbs r0, r3, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* beware -- each __kuser slot must be 8 instructions max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ALT_SMP(b __kuser_memory_barrier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ALT_UP(usr_ret lr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) kuser_pad __kuser_cmpxchg, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) __kuser_get_tls: @ 0xffff0fe0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) usr_ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) kuser_pad __kuser_get_tls, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .rep 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .word 0 @ 0xffff0ff0 software TLS value, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .endr @ pad up to __kuser_helper_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) __kuser_helper_version: @ 0xffff0ffc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .globl __kuser_helper_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) __kuser_helper_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) THUMB( .thumb )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * Vector stubs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * This code is copied to 0xffff1000 so we can use branches in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * vectors, rather than ldr's. Note that this code must not exceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * a page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Common stub entry macro:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * SP points to a minimal amount of processor-private memory, the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * of which is copied into r0 for the mode specific abort handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .macro vector_stub, name, mode, correction=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) vector_\name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .if \correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) sub lr, lr, #\correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) @ Save r0, lr_<exception> (parent PC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) stmia sp, {r0, lr} @ save r0, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) @ Save spsr_<exception> (parent CPSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 2: mrs lr, spsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) str lr, [sp, #8] @ save spsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) @ Prepare for SVC32 mode. IRQs remain disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) mrs r0, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) msr spsr_cxsf, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) @ the branch table must immediately follow this code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) and lr, lr, #0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) THUMB( adr r0, 1f )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) THUMB( ldr lr, [r0, lr, lsl #2] )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mov r0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ARM( ldr lr, [pc, lr, lsl #2] )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) movs pc, lr @ branch to handler in SVC mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ENDPROC(vector_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #ifdef CONFIG_HARDEN_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .subsection 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) vector_bhb_loop8_\name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .if \correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) sub lr, lr, #\correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) @ Save r0, lr_<exception> (parent PC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) stmia sp, {r0, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) @ bhb workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) mov r0, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 3: b . + 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) subs r0, r0, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) bne 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) dsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) b 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ENDPROC(vector_bhb_loop8_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) vector_bhb_bpiall_\name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) .if \correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) sub lr, lr, #\correction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) @ Save r0, lr_<exception> (parent PC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) stmia sp, {r0, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) @ bhb workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) mcr p15, 0, r0, c7, c5, 6 @ BPIALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) @ isb not needed due to "movs pc, lr" in the vector stub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) @ which gives a "context synchronisation".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) b 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ENDPROC(vector_bhb_bpiall_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) @ handler addresses follow this label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .section .stubs, "ax", %progbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) @ This must be the first word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .word vector_swi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) #ifdef CONFIG_HARDEN_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) .word vector_bhb_loop8_swi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) .word vector_bhb_bpiall_swi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) vector_rst:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ARM( swi SYS_ERROR0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) THUMB( svc #0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) THUMB( nop )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) b vector_und
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * Interrupt dispatcher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) vector_stub irq, IRQ_MODE, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) .long __irq_usr @ 0 (USR_26 / USR_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .long __irq_svc @ 3 (SVC_26 / SVC_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .long __irq_invalid @ 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .long __irq_invalid @ 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .long __irq_invalid @ 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .long __irq_invalid @ 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .long __irq_invalid @ 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .long __irq_invalid @ 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .long __irq_invalid @ a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) .long __irq_invalid @ b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .long __irq_invalid @ c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .long __irq_invalid @ d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .long __irq_invalid @ e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .long __irq_invalid @ f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * Data abort dispatcher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) vector_stub dabt, ABT_MODE, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .long __dabt_usr @ 0 (USR_26 / USR_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .long __dabt_svc @ 3 (SVC_26 / SVC_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) .long __dabt_invalid @ 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .long __dabt_invalid @ 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .long __dabt_invalid @ 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .long __dabt_invalid @ 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .long __dabt_invalid @ 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) .long __dabt_invalid @ 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) .long __dabt_invalid @ a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) .long __dabt_invalid @ b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) .long __dabt_invalid @ c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) .long __dabt_invalid @ d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) .long __dabt_invalid @ e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .long __dabt_invalid @ f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * Prefetch abort dispatcher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) vector_stub pabt, ABT_MODE, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .long __pabt_usr @ 0 (USR_26 / USR_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .long __pabt_svc @ 3 (SVC_26 / SVC_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .long __pabt_invalid @ 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .long __pabt_invalid @ 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .long __pabt_invalid @ 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .long __pabt_invalid @ 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .long __pabt_invalid @ 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .long __pabt_invalid @ 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .long __pabt_invalid @ a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .long __pabt_invalid @ b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) .long __pabt_invalid @ c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) .long __pabt_invalid @ d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .long __pabt_invalid @ e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .long __pabt_invalid @ f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * Undef instr entry dispatcher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) vector_stub und, UND_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .long __und_usr @ 0 (USR_26 / USR_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) .long __und_svc @ 3 (SVC_26 / SVC_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) .long __und_invalid @ 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) .long __und_invalid @ 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .long __und_invalid @ 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) .long __und_invalid @ 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .long __und_invalid @ 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .long __und_invalid @ 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) .long __und_invalid @ a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) .long __und_invalid @ b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) .long __und_invalid @ c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) .long __und_invalid @ d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .long __und_invalid @ e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) .long __und_invalid @ f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*=============================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Address exception handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) *-----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * These aren't too critical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * (they're not supposed to happen, and won't happen in 32-bit data mode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) vector_addrexcptn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) b vector_addrexcptn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /*=============================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * FIQ "NMI" handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) *-----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * systems. This must be the last vector stub, so lets place it in its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * subsection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) .subsection 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) vector_stub fiq, FIQ_MODE, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) .long __fiq_usr @ 0 (USR_26 / USR_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .long __fiq_svc @ 3 (SVC_26 / SVC_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) .long __fiq_svc @ 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) .long __fiq_svc @ 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .long __fiq_svc @ 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) .long __fiq_abt @ 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) .long __fiq_svc @ 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) .long __fiq_svc @ 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) .long __fiq_svc @ a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) .long __fiq_svc @ b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .long __fiq_svc @ c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) .long __fiq_svc @ d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .long __fiq_svc @ e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .long __fiq_svc @ f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) .globl vector_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) .section .vectors, "ax", %progbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) .L__vectors_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) W(b) vector_rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) W(b) vector_und
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) W(ldr) pc, .L__vectors_start + 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) W(b) vector_pabt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) W(b) vector_dabt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) W(b) vector_addrexcptn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) W(b) vector_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) W(b) vector_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #ifdef CONFIG_HARDEN_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) .section .vectors.bhb.loop8, "ax", %progbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) .L__vectors_bhb_loop8_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) W(b) vector_rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) W(b) vector_bhb_loop8_und
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) W(b) vector_bhb_loop8_pabt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) W(b) vector_bhb_loop8_dabt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) W(b) vector_addrexcptn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) W(b) vector_bhb_loop8_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) W(b) vector_bhb_loop8_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) .section .vectors.bhb.bpiall, "ax", %progbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) .L__vectors_bhb_bpiall_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) W(b) vector_rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) W(b) vector_bhb_bpiall_und
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) W(b) vector_bhb_bpiall_pabt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) W(b) vector_bhb_bpiall_dabt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) W(b) vector_addrexcptn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) W(b) vector_bhb_bpiall_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) W(b) vector_bhb_bpiall_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .globl cr_alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) cr_alignment:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) .space 4