^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/proc-xsc3.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Original Author: Matthew Gilbert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright 2004 (C) Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright 2005 (C) MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * an extension to Intel's original XScale core that adds the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - ARMv6 Supersections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - Low Locality Reference pages (replaces mini-cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - 36-bit addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * - L2 cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Cache coherency if chipset supports it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Based on original XScale code by Nicolas Pitre.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/hwcap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * This is the maximum size of an area which will be flushed. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * area is larger than this, then we flush the whole cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MAX_AREA_SIZE 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * The cache line size of the L1 I, L1 D and unified L2 cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CACHELINESIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * The size of the L1 D cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CACHESIZE 32768
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * This macro is used to wait for a CP15 write and is needed when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * have to ensure that the last operation to the coprocessor was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * completed before continuing with operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .macro cpwait_ret, lr, rd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) sub pc, \lr, \rd, LSR #32 @ wait for completion and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) @ flush instruction pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * This macro cleans and invalidates the entire L1 D cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .macro clean_d_cache rd, rs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mov \rd, #0x1f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) orr \rd, \rd, #0x00e0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) adds \rd, \rd, #0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bcc 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) subs \rd, \rd, #0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) bpl 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * cpu_xsc3_proc_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Nothing too exciting at the moment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ENTRY(cpu_xsc3_proc_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * cpu_xsc3_proc_fin()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ENTRY(cpu_xsc3_proc_fin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) mrc p15, 0, r0, c1, c0, 0 @ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) bic r0, r0, #0x1800 @ ...IZ...........
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bic r0, r0, #0x0006 @ .............CA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) mcr p15, 0, r0, c1, c0, 0 @ disable caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * cpu_xsc3_reset(loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Perform a soft reset of the system. Put the CPU into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * same state as it would be if it had been reset, and branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * to what would be the reset vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * loc: location to jump to for soft reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .pushsection .idmap.text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ENTRY(cpu_xsc3_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) msr cpsr_c, r1 @ reset CPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mrc p15, 0, r1, c1, c0, 0 @ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bic r1, r1, #0x3900 @ ..VIZ..S........
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bic r1, r1, #0x0086 @ ........B....CA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mcr p15, 0, r1, c1, c0, 0 @ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bic r1, r1, #0x0001 @ ...............M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mcr p15, 0, r1, c1, c0, 0 @ ctrl register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) @ CAUTION: MMU turned off from this point. We count on the pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) @ already containing those two last instructions to survive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ENDPROC(cpu_xsc3_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * cpu_xsc3_do_idle()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Cause the processor to idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * For now we do nothing but go to idle mode for every case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * XScale supports clock switching, but using idle mode support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * allows external hardware to react to system state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ENTRY(cpu_xsc3_do_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mov r0, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mcr p14, 0, r0, c7, c0, 0 @ go to idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* ================================= CACHE ================================ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Unconditionally clean and invalidate the entire icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ENTRY(xsc3_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ENDPROC(xsc3_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * flush_user_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Invalidate all cache entries in a particular address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ENTRY(xsc3_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * flush_kern_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Clean and invalidate the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ENTRY(xsc3_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) mov r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __flush_whole_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) clean_d_cache r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * flush_user_cache_range(start, end, vm_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Invalidate a range of cache entries in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * - start - start address (may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * - end - end address (exclusive, may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * - vma - vma_area_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ENTRY(xsc3_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) sub r3, r1, r0 @ calculate total size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) cmp r3, #MAX_AREA_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) bhs __flush_whole_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 1: tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * coherent_kern_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Ensure coherency between the I cache and the D cache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Note: single I-cache line invalidation isn't used here since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * it also trashes the mini I-cache used by JTAG debuggers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ENTRY(xsc3_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ENTRY(xsc3_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bic r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mcr p15, 0, r0, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Ensure no D cache aliasing occurs, either with itself or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * the I cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * - addr - kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * - size - region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ENTRY(xsc3_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) add r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mcr p15, 0, r0, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * dma_inv_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Invalidate (discard) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * May not write back any entries. If 'start' or 'end'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * are not cache line aligned, those lines must be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) xsc3_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) tst r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) bic r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) tst r1, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) mcr p15, 0, r0, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * dma_clean_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Clean the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) xsc3_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bic r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) mcr p15, 0, r0, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * dma_flush_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Clean and invalidate the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ENTRY(xsc3_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bic r0, r0, #CACHELINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) mcr p15, 0, r0, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ENTRY(xsc3_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cmp r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) beq xsc3_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) bcs xsc3_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) b xsc3_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ENDPROC(xsc3_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ENTRY(xsc3_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ENDPROC(xsc3_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .globl xsc3_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) define_cache_functions xsc3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ENTRY(cpu_xsc3_dcache_clean_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) add r0, r0, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) subs r1, r1, #CACHELINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bhi 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* =============================== PageTable ============================== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * cpu_xsc3_switch_mm(pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Set the translation base pointer to be as described by pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * pgd: new page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ENTRY(cpu_xsc3_switch_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) clean_d_cache r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) mcr p15, 0, ip, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) orr r0, r0, #0x18 @ cache the page table in L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cpwait_ret lr, ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * cpu_xsc3_set_pte_ext(ptep, pte, ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Set a PTE and flush it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) cpu_xsc3_mt_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) .long 0x00 @ L_PTE_MT_UNCACHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) .long 0x00 @ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) .long 0x00 @ L_PTE_MT_MINICACHE (not present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) .long 0x00 @ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .long 0x00 @ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .long 0x00 @ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) .long 0x00 @ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .long 0x00 @ unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ENTRY(cpu_xsc3_set_pte_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) xscale_set_pte_ext_prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) tst r1, #L_PTE_SHARED @ shared?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) and r1, r1, #L_PTE_MT_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) adr ip, cpu_xsc3_mt_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ldr ip, [ip, r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) bic r2, r2, #0x0c @ clear old C,B bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) orr r2, r2, ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) xscale_set_pte_ext_epilogue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .globl cpu_xsc3_suspend_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .equ cpu_xsc3_suspend_size, 4 * 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #ifdef CONFIG_ARM_CPU_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ENTRY(cpu_xsc3_do_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) stmfd sp!, {r4 - r9, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mrc p15, 0, r5, c15, c1, 0 @ CP access reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mrc p15, 0, r6, c13, c0, 0 @ PID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mrc p15, 0, r7, c3, c0, 0 @ domain ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mrc p15, 0, r9, c1, c0, 0 @ control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) bic r4, r4, #2 @ clear frequency change bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) stmia r0, {r4 - r9} @ store cp regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ldmia sp!, {r4 - r9, pc}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ENDPROC(cpu_xsc3_do_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ENTRY(cpu_xsc3_do_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ldmia r0, {r4 - r9} @ load cp regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mcr p15, 0, r5, c15, c1, 0 @ CP access reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mcr p15, 0, r6, c13, c0, 0 @ PID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mcr p15, 0, r7, c3, c0, 0 @ domain ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) orr r1, r1, #0x18 @ cache the page table in L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mov r0, r9 @ control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) b cpu_resume_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ENDPROC(cpu_xsc3_do_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .type __xsc3_setup, #function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) __xsc3_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) msr cpsr_c, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mcr p15, 0, ip, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) orr r4, r4, #0x18 @ cache the page table in L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mov r0, #1 << 6 @ cp6 access for early sched_clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mcr p15, 0, r0, c15, c1, 0 @ write CP access register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) and r0, r0, #2 @ preserve bit P bit setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) orr r0, r0, #(1 << 10) @ enable L2 for LLR cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) adr r5, xsc3_crval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ldmia r5, {r5, r6}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #ifdef CONFIG_CACHE_XSC3L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mrc p15, 1, r0, c0, c0, 1 @ get L2 present information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ands r0, r0, #0xf8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) orrne r6, r6, #(1 << 26) @ enable L2 if present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mrc p15, 0, r0, c1, c0, 0 @ get control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bic r0, r0, r5 @ ..V. ..R. .... ..A.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) @ ...I Z..S .... .... (uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .size __xsc3_setup, . - __xsc3_setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) .type xsc3_crval, #object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) xsc3_crval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) __INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .section ".rodata"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) string cpu_arch_name, "armv5te"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) string cpu_elf_name, "v5"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) string cpu_xsc3_name, "XScale-V3 based processor"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) .align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .section ".proc.info.init", "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .type __\name\()_proc_info,#object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __\name\()_proc_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) .long \cpu_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .long \cpu_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .long PMD_TYPE_SECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) PMD_SECT_BUFFERABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) PMD_SECT_CACHEABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) PMD_SECT_AP_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) PMD_SECT_AP_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) .long PMD_TYPE_SECT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) PMD_SECT_AP_WRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) PMD_SECT_AP_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) initfn __xsc3_setup, __\name\()_proc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) .long cpu_arch_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) .long cpu_elf_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .long cpu_xsc3_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .long xsc3_processor_functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .long v4wbi_tlb_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .long xsc3_mc_user_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .long xsc3_cache_fns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .size __\name\()_proc_info, . - __\name\()_proc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) xsc3_proc_info xsc3, 0x69056000, 0xffffe000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Note: PXA935 changed its implementor ID from Intel to Marvell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000