^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/arm/kernel/crunch-bits.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Cirrus MaverickCrunch context switching and handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2003-2004, MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <mach/ep93xx-regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * We can't use hex constants here due to a bug in gas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define CRUNCH_MVDX0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define CRUNCH_MVDX1 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define CRUNCH_MVDX2 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define CRUNCH_MVDX3 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define CRUNCH_MVDX4 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define CRUNCH_MVDX5 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define CRUNCH_MVDX6 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define CRUNCH_MVDX7 56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CRUNCH_MVDX8 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define CRUNCH_MVDX9 72
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define CRUNCH_MVDX10 80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define CRUNCH_MVDX11 88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define CRUNCH_MVDX12 96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define CRUNCH_MVDX13 104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CRUNCH_MVDX14 112
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CRUNCH_MVDX15 120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define CRUNCH_MVAX0L 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CRUNCH_MVAX0M 132
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CRUNCH_MVAX0H 136
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CRUNCH_MVAX1L 140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define CRUNCH_MVAX1M 144
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CRUNCH_MVAX1H 148
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CRUNCH_MVAX2L 152
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CRUNCH_MVAX2M 156
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CRUNCH_MVAX2H 160
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CRUNCH_MVAX3L 164
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CRUNCH_MVAX3M 168
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define CRUNCH_MVAX3H 172
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define CRUNCH_DSPSC 176
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define CRUNCH_SIZE 184
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Lazy switching of crunch coprocessor context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * r10 = struct thread_info pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * r9 = ret_from_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * lr = undefined instr exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * called from prefetch exception handler with interrupts enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ENTRY(crunch_task_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) inc_preempt_count r10, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ldr r1, [r8, #0x80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) tst r1, #0x00800000 @ access to crunch enabled?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) bne 2f @ if so no business here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) mov r3, #0xaa @ unlock syscon swlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) str r3, [r8, #0xc0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) orr r1, r1, #0x00800000 @ enable access to crunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) str r1, [r8, #0x80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ldr r3, =crunch_owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ldr r2, [sp, #60] @ current task pc value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ldr r1, [r3] @ get current crunch owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) str r0, [r3] @ this task now owns crunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) sub r2, r2, #4 @ adjust pc back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) str r2, [sp, #60]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ldr r2, [r8, #0x80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mov r2, r2 @ flush out enable (@@@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) teq r1, #0 @ test for last ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mov lr, r9 @ normal exit from exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) beq crunch_load @ no owner, skip save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) crunch_save:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifdef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #error fix me for ARMEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) cfmv32al mvfx0, mvax0 @ save 72b accumulators
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cfmv32am mvfx0, mvax0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) cfmv32ah mvfx0, mvax0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cfmv32al mvfx0, mvax1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cfmv32am mvfx0, mvax1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cfmv32ah mvfx0, mvax1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) cfmv32al mvfx0, mvax2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) cfmv32am mvfx0, mvax2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cfmv32ah mvfx0, mvax2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) cfmv32al mvfx0, mvax3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) cfmv32am mvfx0, mvax3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) cfmv32ah mvfx0, mvax3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) cfmv32sc mvdx0, dspsc @ save status word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) teq r0, #0 @ anything to load?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) crunch_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) cfmvsc32 dspsc, mvdx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cfmval32 mvax0, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) cfmvam32 mvax0, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) cfmvah32 mvax0, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) cfmval32 mvax1, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cfmvam32 mvax1, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cfmvah32 mvax1, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) cfmval32 mvax2, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) cfmvam32 mvax2, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cfmvah32 mvax2, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cfmval32 mvax3, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cfmvam32 mvax3, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cfmvah32 mvax3, mvfx0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #ifdef CONFIG_PREEMPT_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) get_thread_info r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 2: dec_preempt_count r10, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Back up crunch regs to save area and disable access to them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * (mainly for gdb or sleep mode usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * r0 = struct thread_info pointer of target task or NULL for any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ENTRY(crunch_task_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) stmfd sp!, {r4, r5, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mrs ip, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) orr r2, ip, #PSR_I_BIT @ disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) msr cpsr_c, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ldr r3, =crunch_owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ldr r1, [r3] @ get current crunch owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) teq r1, #0 @ any current owner?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) beq 1f @ no: quit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) teq r0, #0 @ any owner?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) teqne r1, r2 @ or specified one?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) bne 1f @ no: quit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ldr r5, [r4, #0x80] @ enable access to crunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) mov r2, #0xaa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) str r2, [r4, #0xc0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) orr r5, r5, #0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) str r5, [r4, #0x80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mov r0, #0 @ nothing to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) str r0, [r3] @ no more current owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ldr r2, [r4, #0x80] @ flush out enable (@@@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mov r2, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) bl crunch_save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mov r2, #0xaa @ disable access to crunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) str r2, [r4, #0xc0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) bic r5, r5, #0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) str r5, [r4, #0x80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ldr r5, [r4, #0x80] @ flush out enable (@@@)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mov r5, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 1: msr cpsr_c, ip @ restore interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ldmfd sp!, {r4, r5, pc}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Copy crunch state to given memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * r0 = struct thread_info pointer of target task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * r1 = memory address where to store crunch state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * this is called mainly in the creation of signal stack frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ENTRY(crunch_task_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) mrs ip, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) orr r2, ip, #PSR_I_BIT @ disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) msr cpsr_c, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ldr r3, =crunch_owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ldr r3, [r3] @ get current crunch owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) teq r2, r3 @ does this task own it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) @ current crunch values are in the task save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) msr cpsr_c, ip @ restore interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mov r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) mov r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mov r2, #CRUNCH_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) b memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 1: @ this task owns crunch regs -- grab a copy from there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) mov r0, #0 @ nothing to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mov r3, lr @ preserve return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) bl crunch_save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) msr cpsr_c, ip @ restore interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ret r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Restore crunch state from given memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * r0 = struct thread_info pointer of target task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * r1 = memory address where to get crunch state from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * this is used to restore crunch state when unwinding a signal stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ENTRY(crunch_task_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) mrs ip, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) orr r2, ip, #PSR_I_BIT @ disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) msr cpsr_c, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ldr r3, =crunch_owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ldr r3, [r3] @ get current crunch owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) teq r2, r3 @ does this task own it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) @ this task doesn't own crunch regs -- use its save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) msr cpsr_c, ip @ restore interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) mov r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) mov r2, #CRUNCH_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) b memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 1: @ this task owns crunch regs -- load them directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mov r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mov r1, #0 @ nothing to save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mov r3, lr @ preserve return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) bl crunch_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) msr cpsr_c, ip @ restore interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret r3