Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/cache-v4wb.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1997-2002 Russell king
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * The size of one data cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define CACHE_DLINESIZE	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * The total size of the data cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #if defined(CONFIG_CPU_SA110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) # define CACHE_DSIZE	16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #elif defined(CONFIG_CPU_SA1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) # define CACHE_DSIZE	8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) # error Unknown cache size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * This is the size at which it becomes more efficient to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * clean the whole cache, rather than using the individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * cache line maintenance instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *  Size  Clean (ticks) Dirty (ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *   4096   21  20  21    53  55  54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *   8192   40  41  40   106 100 102
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *  16384   77  77  76   140 140 138
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *  32768  150 149 150   214 216 212 <---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *  65536  296 297 296   351 358 361
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * 131072  591 591 591   656 657 651
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *  Whole  132 136 132   221 217 207 <---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define CACHE_DLIMIT	(CACHE_DSIZE * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	.data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) flush_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	.long	FLUSH_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *	flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *	Unconditionally clean and invalidate the entire icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) ENTRY(v4wb_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) ENDPROC(v4wb_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *	flush_user_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *	Clean and invalidate all cache entries in a particular address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *	space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) ENTRY(v4wb_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	/* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *	flush_kern_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *	Clean and invalidate the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) ENTRY(v4wb_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	mov	ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) __flush_whole_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	ldr	r3, =flush_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	ldr	r1, [r3, #0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	eor	r1, r1, #CACHE_DSIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	str	r1, [r3, #0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	add	r2, r1, #CACHE_DSIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 1:	ldr	r3, [r1], #32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	cmp	r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #ifdef FLUSH_BASE_MINICACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	sub	r1, r2, #512			@ only 512 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 1:	ldr	r3, [r1], #32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	cmp	r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *	flush_user_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  *	Invalidate a range of cache entries in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  *	address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  *	- start - start address (inclusive, page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  *	- end	- end address (exclusive, page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  *	- flags	- vma_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ENTRY(v4wb_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	mov	ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	sub	r3, r1, r0			@ calculate total size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	tst	r2, #VM_EXEC			@ executable region?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	bhs	__flush_whole_cache		@ flush whole D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	add	r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	tst	r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  *	flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  *	Ensure no D cache aliasing occurs, either with itself or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  *	the I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  *	- addr	- kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  *	- size	- region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ENTRY(v4wb_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	add	r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *	coherent_kern_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *	Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *	region described by start.  If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *	Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ENTRY(v4wb_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *	coherent_user_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *	Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *	region described by start.  If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *	Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ENTRY(v4wb_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	bic	r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	add	r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *	dma_inv_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *	Invalidate (discard) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *	May not write back any entries.  If 'start' or 'end'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *	are not cache line aligned, those lines must be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  *	back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) v4wb_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	tst	r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	bic	r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	tst	r1, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	add	r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *	dma_clean_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *	Clean (write back) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) v4wb_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	bic	r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	add	r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  *	dma_flush_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  *	Clean and invalidate the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *	This is actually the same as v4wb_coherent_kern_range()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	.globl	v4wb_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *	dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ENTRY(v4wb_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	add	r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	cmp	r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	beq	v4wb_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	bcs	v4wb_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	b	v4wb_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ENDPROC(v4wb_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  *	dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ENTRY(v4wb_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ENDPROC(v4wb_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	.globl	v4wb_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	.equ	v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	__INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	define_cache_functions v4wb