Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/cache-v7m.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Based on linux/arch/arm/mm/cache-v7.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  Copyright (C) 2001 Deep Blue Solutions Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  Copyright (C) 2005 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *  This is the "shell" of the ARMv7M processor support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/v7m.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /* Generic V7M read/write macros for memory mapped cache operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) .macro v7m_cache_read, rt, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	movw	\rt, #:lower16:BASEADDR_V7M_SCB + \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	movt	\rt, #:upper16:BASEADDR_V7M_SCB + \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	ldr     \rt, [\rt]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) .macro v7m_cacheop, rt, tmp, op, c = al
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	movw\c	\tmp, #:lower16:BASEADDR_V7M_SCB + \op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	movt\c	\tmp, #:upper16:BASEADDR_V7M_SCB + \op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	str\c	\rt, [\tmp]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) .macro	read_ccsidr, rt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	v7m_cache_read \rt, V7M_SCB_CCSIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) .macro read_clidr, rt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	v7m_cache_read \rt, V7M_SCB_CLIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) .macro	write_csselr, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * dcisw: Invalidate data cache by set/way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) .macro dcisw, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * dccisw: Clean and invalidate data cache by set/way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) .macro dccisw, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * dccimvac: Clean and invalidate data cache line by MVA to PoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) .macro dccimvac\c, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * dcimvac: Invalidate data cache line by MVA to PoC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) .macro dcimvac\c, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * dccmvau: Clean data cache line by MVA to PoU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) .macro dccmvau, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * dccmvac: Clean data cache line by MVA to PoC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) .macro dccmvac,  rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * icimvau: Invalidate instruction caches by MVA to PoU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) .macro icimvau, rt, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * rt data ignored by ICIALLU(IS), so can be used for the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .macro invalidate_icache, rt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	mov \rt, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * Invalidate the BTB, inner shareable if SMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * rt data ignored by BPIALL, so it can be used for the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .macro invalidate_bp, rt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	mov \rt, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ENTRY(v7m_invalidate_l1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	write_csselr r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	read_ccsidr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	movw	r1, #0x7fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	and	r2, r1, r0, lsr #13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	movw	r1, #0x3ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	and	r3, r1, r0, lsr #3      @ NumWays - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	add	r2, r2, #1              @ NumSets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	and	r0, r0, #0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	add	r0, r0, #4      @ SetShift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	clz	r1, r3          @ WayShift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	add	r4, r3, #1      @ NumWays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 1:	sub	r2, r2, #1      @ NumSets--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mov	r3, r4          @ Temp = NumWays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 2:	subs	r3, r3, #1      @ Temp--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	mov	r5, r3, lsl r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	mov	r6, r2, lsl r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	orr	r5, r5, r6      @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	dcisw	r5, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	bgt	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	cmp	r2, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	bgt	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	dsb	st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ENDPROC(v7m_invalidate_l1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *	v7m_flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *	Flush the whole I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *	Registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *	r0 - set to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ENTRY(v7m_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	invalidate_icache r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ENDPROC(v7m_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *	v7m_flush_dcache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *	Flush the whole D-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *	Corrupted registers: r0-r7, r9-r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ENTRY(v7m_flush_dcache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	dmb					@ ensure ordering with previous memory accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	read_clidr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	mov	r3, r0, lsr #23			@ move LoC into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	ands	r3, r3, #7 << 1			@ extract LoC*2 from clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	beq	finished			@ if loc is 0, then no need to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) start_flush_levels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	mov	r10, #0				@ start clean at cache level 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) flush_levels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	add	r2, r10, r10, lsr #1		@ work out 3x current cache level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	mov	r1, r0, lsr r2			@ extract cache type bits from clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	and	r1, r1, #7			@ mask of the bits for current cache only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	cmp	r1, #2				@ see what cache we have at this level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	blt	skip				@ skip if no cache, or just i-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	save_and_disable_irqs_notrace r9	@ make cssr&csidr read atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	write_csselr r10, r1			@ set current cache level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	isb					@ isb to sych the new cssr&csidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	read_ccsidr r1				@ read the new csidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	restore_irqs_notrace r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	and	r2, r1, #7			@ extract the length of the cache lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	add	r2, r2, #4			@ add 4 (line length offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	movw	r4, #0x3ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	ands	r4, r4, r1, lsr #3		@ find maximum number on the way size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	clz	r5, r4				@ find bit position of way size increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	movw	r7, #0x7fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	ands	r7, r7, r1, lsr #13		@ extract max number of the index size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) loop1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	mov	r9, r7				@ create working copy of max index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) loop2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	lsl	r6, r4, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	orr	r11, r10, r6			@ factor way and cache number into r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	lsl	r6, r9, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	orr	r11, r11, r6			@ factor index number into r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	dccisw	r11, r6				@ clean/invalidate by set/way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	subs	r9, r9, #1			@ decrement the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	bge	loop2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	subs	r4, r4, #1			@ decrement the way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	bge	loop1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	add	r10, r10, #2			@ increment cache number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	cmp	r3, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	bgt	flush_levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	mov	r10, #0				@ switch back to cache level 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	write_csselr r10, r3			@ select current cache level in cssr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	dsb	st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ENDPROC(v7m_flush_dcache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  *	v7m_flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *	Flush the entire cache system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *  The data cache flush is now achieved using atomic clean / invalidates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *  working outwards from L1 cache. This is done using Set/Way based cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *  maintenance instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *  The instruction cache can still be invalidated back to the point of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  *  unification in a single instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ENTRY(v7m_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	stmfd	sp!, {r4-r7, r9-r11, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	bl	v7m_flush_dcache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	invalidate_icache r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	ldmfd	sp!, {r4-r7, r9-r11, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ENDPROC(v7m_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  *	v7m_flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  *	Flush all TLB entries in a particular address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *	- mm    - mm_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ENTRY(v7m_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	/*FALLTHROUGH*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *	v7m_flush_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  *	Flush a range of TLB entries in the specified address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  *	- start - start address (may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  *	- end   - end address (exclusive, may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  *	- flags	- vm_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  *	It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  *	- we have a VIPT cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ENTRY(v7m_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ENDPROC(v7m_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ENDPROC(v7m_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  *	v7m_coherent_kern_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  *	Ensure that the I and D caches are coherent within specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  *	region.  This is typically used when code has been written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *	a memory region, and will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  *	- start   - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  *	- end     - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  *	It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  *	- the Icache does not read data from the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ENTRY(v7m_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	/* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  *	v7m_coherent_user_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *	Ensure that the I and D caches are coherent within specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  *	region.  This is typically used when code has been written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  *	a memory region, and will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  *	- start   - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  *	- end     - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  *	It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *	- the Icache does not read data from the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ENTRY(v7m_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  UNWIND(.fnstart		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	sub	r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	bic	r12, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * We use open coded version of dccmvau otherwise USER() would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * point at movw instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	dccmvau	r12, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	add	r12, r12, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	cmp	r12, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	dsb	ishst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	icache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	sub	r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	bic	r12, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	icimvau r12, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	add	r12, r12, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	cmp	r12, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	blo	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	invalidate_bp r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	dsb	ishst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  UNWIND(.fnend		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ENDPROC(v7m_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ENDPROC(v7m_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  *	v7m_flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  *	Ensure that the data held in the page kaddr is written back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  *	to the page in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  *	- addr	- kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  *	- size	- region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ENTRY(v7m_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	add	r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	sub	r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	bic	r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	dccimvac r0, r3		@ clean & invalidate D line / unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	add	r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	dsb	st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ENDPROC(v7m_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  *	v7m_dma_inv_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  *	Invalidate the data cache within the specified region; we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  *	be performing a DMA operation in this region and we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  *	purge old data in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  *	- start   - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  *	- end     - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) v7m_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	sub	r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	tst	r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	bic	r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	dccimvacne r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	addne	r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	subne	r3, r2, #1	@ restore r3, corrupted by v7m's dccimvac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	tst	r1, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	bic	r1, r1, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	dccimvacne r1, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	dcimvaclo r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	addlo	r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	cmplo	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	dsb	st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ENDPROC(v7m_dma_inv_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  *	v7m_dma_clean_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  *	- start   - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  *	- end     - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) v7m_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	sub	r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	bic	r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	dccmvac r0, r3			@ clean D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	add	r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	dsb	st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ENDPROC(v7m_dma_clean_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  *	v7m_dma_flush_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  *	- start   - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  *	- end     - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ENTRY(v7m_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	sub	r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	bic	r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	dccimvac r0, r3			 @ clean & invalidate D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	add	r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	dsb	st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ENDPROC(v7m_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  *	dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ENTRY(v7m_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	add	r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	teq	r2, #DMA_FROM_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	beq	v7m_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	b	v7m_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ENDPROC(v7m_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)  *	dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ENTRY(v7m_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	add	r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	teq	r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	bne	v7m_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ENDPROC(v7m_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	.globl	v7m_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	.equ	v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	__INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	define_cache_functions v7m