Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/proc-xscale.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Author:	Nicolas Pitre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  Created:	November 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  Copyright:	(C) 2000, 2001 MontaVista Software Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * MMU functions for the Intel XScale CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * 2001 Aug 21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *	some contributions by Brett Gaines <brett.w.gaines@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *	Copyright 2001 by Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * 2001 Sep 08:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *	Completely revisited, many important fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *	Nicolas Pitre <nico@fluxnic.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/hwcap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * This is the maximum size of an area which will be flushed.  If the area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * is larger than this, then we flush the whole cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define MAX_AREA_SIZE	32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * the cache line size of the I and D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define CACHELINESIZE	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * the size of the data cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define CACHESIZE	32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * Virtual address used to allocate the cache when flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * This must be an address range which is _never_ used.  It should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * apparently have a mapping in the corresponding page table for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * compatibility with future CPUs that _could_ require it.  For instance we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * don't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * the 2 areas in alternance each time the clean_d_cache macro is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Without this the XScale core exhibits cache eviction problems and no one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * knows why.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define CLEAN_ADDR	0xfffe0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * This macro is used to wait for a CP15 write and is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * when we have to ensure that the last operation to the co-pro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * was completed before continuing with operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	.macro	cpwait, rd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	mov	\rd, \rd			@ wait for completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	sub 	pc, pc, #4			@ flush instruction pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	.macro	cpwait_ret, lr, rd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	sub	pc, \lr, \rd, LSR #32		@ wait for completion and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 						@ flush instruction pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * This macro cleans the entire dcache using line allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * The main loop has been unrolled to reduce loop overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * rd and rs are two scratch registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	.macro  clean_d_cache, rd, rs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	ldr	\rs, =clean_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	ldr	\rd, [\rs]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	eor	\rd, \rd, #CACHESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	str	\rd, [\rs]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	add	\rs, \rd, #CACHESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 1:	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	add	\rd, \rd, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	add	\rd, \rd, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	add	\rd, \rd, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	add	\rd, \rd, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	teq	\rd, \rs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	.data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) clean_addr:	.word	CLEAN_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * cpu_xscale_proc_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * Nothing too exciting at the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ENTRY(cpu_xscale_proc_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	@ enable write buffer coalescing. Some bootloader disable it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	mrc	p15, 0, r1, c1, c0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	bic	r1, r1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mcr	p15, 0, r1, c1, c0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * cpu_xscale_proc_fin()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ENTRY(cpu_xscale_proc_fin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	bic	r0, r0, #0x1800			@ ...IZ...........
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	bic	r0, r0, #0x0006			@ .............CA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * cpu_xscale_reset(loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * Perform a soft reset of the system.  Put the CPU into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * same state as it would be if it had been reset, and branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * to what would be the reset vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * loc: location to jump to for soft reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Beware PXA270 erratum E7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	.pushsection	.idmap.text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ENTRY(cpu_xscale_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	mov	r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	msr	cpsr_c, r1			@ reset CPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	mcr	p15, 0, r1, c10, c4, 1		@ unlock I-TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	mcr	p15, 0, r1, c8, c5, 0		@ invalidate I-TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	bic	r1, r1, #0x0086			@ ........B....CA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	bic	r1, r1, #0x3900			@ ..VIZ..S........
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	sub	pc, pc, #4			@ flush pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	@ *** cache line aligned ***
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	bic	r1, r1, #0x0001			@ ...............M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches & BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	@ CAUTION: MMU turned off from this point. We count on the pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	@ already containing those two last instructions to survive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	ret	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ENDPROC(cpu_xscale_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * cpu_xscale_do_idle()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * Cause the processor to idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * For now we do nothing but go to idle mode for every case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * XScale supports clock switching, but using idle mode support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * allows external hardware to react to system state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ENTRY(cpu_xscale_do_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	mov	r0, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	mcr	p14, 0, r0, c7, c0, 0		@ Go to IDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* ================================= CACHE ================================ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *	flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *	Unconditionally clean and invalidate the entire icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ENTRY(xscale_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ENDPROC(xscale_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  *	flush_user_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *	Invalidate all cache entries in a particular address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *	space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ENTRY(xscale_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	/* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  *	flush_kern_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  *	Clean and invalidate the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ENTRY(xscale_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	mov	r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	mov	ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __flush_whole_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	clean_d_cache r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	tst	r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	mcrne	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  *	flush_user_cache_range(start, end, vm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  *	Invalidate a range of cache entries in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  *	address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  *	- start - start address (may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  *	- end	- end address (exclusive, may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  *	- vma	- vma_area_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ENTRY(xscale_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	mov	ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	sub	r3, r1, r0			@ calculate total size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	cmp	r3, #MAX_AREA_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	bhs	__flush_whole_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 1:	tst	r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	mcrne	p15, 0, r0, c7, c5, 1		@ Invalidate I cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	mcr	p15, 0, r0, c7, c6, 1		@ Invalidate D cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	tst	r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	mcrne	p15, 0, ip, c7, c5, 6		@ Invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  *	coherent_kern_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  *	Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *	region described by start.  If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  *	Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  *	Note: single I-cache line invalidation isn't used here since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *	it also trashes the mini I-cache used by JTAG debuggers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ENTRY(xscale_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	bic	r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  *	coherent_user_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  *	Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  *	region described by start.  If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *	Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ENTRY(xscale_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	bic	r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	mcr	p15, 0, r0, c7, c5, 1		@ Invalidate I cache entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	mcr	p15, 0, r0, c7, c5, 6		@ Invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  *	flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  *	Ensure no D cache aliasing occurs, either with itself or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  *	the I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  *	- addr	- kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  *	- size	- region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ENTRY(xscale_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	add	r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  *	dma_inv_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  *	Invalidate (discard) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  *	May not write back any entries.  If 'start' or 'end'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  *	are not cache line aligned, those lines must be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  *	back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) xscale_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	tst	r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	bic	r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	tst	r1, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  *	dma_clean_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  *	Clean the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) xscale_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	bic	r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  *	dma_flush_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  *	Clean and invalidate the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  *	- start  - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  *	- end	 - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ENTRY(xscale_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	bic	r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	cmp	r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  *	dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ENTRY(xscale_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	add	r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	cmp	r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	beq	xscale_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	bcs	xscale_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	b	xscale_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ENDPROC(xscale_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  *	dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ENTRY(xscale_80200_A0_A1_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	add	r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	teq	r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	beq	xscale_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	b	xscale_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ENDPROC(xscale_80200_A0_A1_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  *	dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  *	- start	- kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  *	- size	- size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  *	- dir	- DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ENTRY(xscale_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ENDPROC(xscale_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	.globl	xscale_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	.equ	xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	define_cache_functions xscale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)  * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * clear the dirty bits, which means that if we invalidate a dirty line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  * the dirty data can still be written back to external memory later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  * The recommended workaround is to always do a clean D-cache line before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * doing an invalidate D-cache line, so on the affected processors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * dma_inv_range() is implemented as dma_flush_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * See erratum #25 of "Intel 80200 Processor Specification Update",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * revision January 22, 2003, available at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  *     http://www.intel.com/design/iio/specupdt/273415.htm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .macro a0_alias basename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	.globl xscale_80200_A0_A1_\basename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	.type xscale_80200_A0_A1_\basename , %function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	.equ xscale_80200_A0_A1_\basename , xscale_\basename
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  * Most of the cache functions are unchanged for these processor revisions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * Export suitable alias symbols for the unchanged functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	a0_alias flush_icache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	a0_alias flush_user_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	a0_alias flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	a0_alias flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	a0_alias flush_user_cache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	a0_alias coherent_kern_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	a0_alias coherent_user_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	a0_alias flush_kern_dcache_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	a0_alias dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	a0_alias dma_unmap_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	define_cache_functions xscale_80200_A0_A1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ENTRY(cpu_xscale_dcache_clean_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	add	r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	subs	r1, r1, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	bhi	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* =============================== PageTable ============================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * cpu_xscale_switch_mm(pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  * Set the translation base pointer to be as described by pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  * pgd: new page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ENTRY(cpu_xscale_switch_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	clean_d_cache r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	mcr	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	cpwait_ret lr, ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  * cpu_xscale_set_pte_ext(ptep, pte, ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)  * Set a PTE and flush it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  * Errata 40: must set memory to write-through for user read-only pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cpu_xscale_mt_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	.long	0x00						@ L_PTE_MT_UNCACHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	.long	PTE_BUFFERABLE					@ L_PTE_MT_BUFFERABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	.long	PTE_CACHEABLE					@ L_PTE_MT_WRITETHROUGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	.long	PTE_EXT_TEX(1) | PTE_BUFFERABLE			@ L_PTE_MT_DEV_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	.long	0x00						@ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE			@ L_PTE_MT_MINICACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	.long	PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE	@ L_PTE_MT_WRITEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	.long	0x00						@ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	.long	PTE_BUFFERABLE					@ L_PTE_MT_DEV_WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	.long	0x00						@ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	.long	PTE_CACHEABLE | PTE_BUFFERABLE			@ L_PTE_MT_DEV_CACHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	.long	0x00						@ L_PTE_MT_DEV_NONSHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	.long	0x00						@ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	.long	0x00						@ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	.long	0x00						@ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ENTRY(cpu_xscale_set_pte_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	xscale_set_pte_ext_prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	@ Erratum 40: must set memory to write-through for user read-only pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	and	ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	teq	ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	moveq	r1, #L_PTE_MT_WRITETHROUGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	and	r1, r1, #L_PTE_MT_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	adr	ip, cpu_xscale_mt_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	ldr	ip, [ip, r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	bic	r2, r2, #0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	orr	r2, r2, ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	xscale_set_pte_ext_epilogue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	.ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	.align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .globl	cpu_xscale_suspend_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) .equ	cpu_xscale_suspend_size, 4 * 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #ifdef CONFIG_ARM_CPU_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ENTRY(cpu_xscale_do_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	stmfd	sp!, {r4 - r9, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	mrc	p15, 0, r5, c15, c1, 0	@ CP access reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	mrc	p15, 0, r6, c13, c0, 0	@ PID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	mrc	p15, 0, r7, c3, c0, 0	@ domain ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	mrc	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	mrc	p15, 0, r9, c1, c0, 0	@ control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	bic	r4, r4, #2		@ clear frequency change bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	stmia	r0, {r4 - r9}		@ store cp regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	ldmfd	sp!, {r4 - r9, pc}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ENDPROC(cpu_xscale_do_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ENTRY(cpu_xscale_do_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	ldmia	r0, {r4 - r9}		@ load cp regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	mov	ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I & D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I & D caches, BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	mcr	p14, 0, r4, c6, c0, 0	@ clock configuration, turbo mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	mcr	p15, 0, r5, c15, c1, 0	@ CP access reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	mcr	p15, 0, r6, c13, c0, 0	@ PID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	mcr	p15, 0, r7, c3, c0, 0	@ domain ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	mcr	p15, 0, r1, c2, c0, 0	@ translation table base addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	mcr	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	mov	r0, r9			@ control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	b	cpu_resume_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ENDPROC(cpu_xscale_do_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	.type	__xscale_setup, #function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __xscale_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I, D caches & BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I, D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	mov	r0, #1 << 6			@ cp6 for IOP3xx and Bulverde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	orr	r0, r0, #1 << 13		@ Its undefined whether this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	mcr	p15, 0, r0, c15, c1, 0		@ affects USR or SVC modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	adr	r5, xscale_crval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	ldmia	r5, {r5, r6}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	mrc	p15, 0, r0, c1, c0, 0		@ get control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	bic	r0, r0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	orr	r0, r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	.size	__xscale_setup, . - __xscale_setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	 *  R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	 * .RVI ZFRS BLDP WCAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	 * ..11 1.01 .... .101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	 * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	.type	xscale_crval, #object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) xscale_crval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	crval	clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	__INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	.section ".rodata"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	string	cpu_arch_name, "armv5te"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	string	cpu_elf_name, "v5"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	string	cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	string	cpu_80200_name, "XScale-80200"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	string	cpu_80219_name, "XScale-80219"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	string	cpu_8032x_name, "XScale-IOP8032x Family"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	string	cpu_8033x_name, "XScale-IOP8033x Family"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	string	cpu_pxa250_name, "XScale-PXA250"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	string	cpu_pxa210_name, "XScale-PXA210"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	string	cpu_ixp42x_name, "XScale-IXP42x Family"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	string	cpu_ixp43x_name, "XScale-IXP43x Family"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	string	cpu_ixp46x_name, "XScale-IXP46x Family"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	string	cpu_ixp2400_name, "XScale-IXP2400"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	string	cpu_ixp2800_name, "XScale-IXP2800"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	string	cpu_pxa255_name, "XScale-PXA255"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	string	cpu_pxa270_name, "XScale-PXA270"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	.align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	.section ".proc.info.init", "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	.type	__\name\()_proc_info,#object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) __\name\()_proc_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	.long	\cpu_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	.long	\cpu_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	.long	PMD_TYPE_SECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		PMD_SECT_BUFFERABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		PMD_SECT_CACHEABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		PMD_SECT_AP_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		PMD_SECT_AP_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	.long	PMD_TYPE_SECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		PMD_SECT_AP_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		PMD_SECT_AP_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	initfn	__xscale_setup, __\name\()_proc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	.long	cpu_arch_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	.long	cpu_elf_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	.long	\cpu_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	.long	xscale_processor_functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	.long	v4wbi_tlb_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	.long	xscale_mc_user_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	.ifb \cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		.long	xscale_cache_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		.long	\cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	.size	__\name\()_proc_info, . - __\name\()_proc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		cache=xscale_80200_A0_A1_cache_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name