^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/cache-v7.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001 Deep Blue Solutions Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This is the "shell" of the ARMv7 processor support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/hardware/cache-b15-rac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) .globl icache_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) icache_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) .long 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The secondary kernel init calls v7_flush_dcache_all before it enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * the L1; however, the L1 comes out of reset in an undefined state, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * of cache lines with uninitialized data and uninitialized tags to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * written out to memory, which does really unpleasant things to the main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * processor. We fix this by performing an invalidate, rather than a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * clean + invalidate, before jumping into the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * to be called for both secondary cores startup and primary core resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * procedures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ENTRY(v7_invalidate_l1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) mcr p15, 2, r0, c0, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) mrc p15, 1, r0, c0, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) movw r1, #0x7fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) and r2, r1, r0, lsr #13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) movw r1, #0x3ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) and r3, r1, r0, lsr #3 @ NumWays - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) add r2, r2, #1 @ NumSets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) and r0, r0, #0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) add r0, r0, #4 @ SetShift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) clz r1, r3 @ WayShift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) add r4, r3, #1 @ NumWays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 1: sub r2, r2, #1 @ NumSets--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mov r3, r4 @ Temp = NumWays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 2: subs r3, r3, #1 @ Temp--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mov r5, r3, lsl r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mov r6, r2, lsl r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mcr p15, 0, r5, c7, c6, 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) bgt 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) cmp r2, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) bgt 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dsb st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ENDPROC(v7_invalidate_l1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * v7_flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Flush the whole I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * r0 - set to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ENTRY(v7_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ENDPROC(v7_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * v7_flush_dcache_louis()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Flush the D-cache up to the Level of Unification Inner Shareable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ENTRY(v7_flush_dcache_louis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dmb @ ensure ordering with previous memory accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bne start_flush_levels @ LoU != 0, start flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef CONFIG_ARM_ERRATA_643719
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ALT_UP( ret lr) @ LoUU is zero, so nothing to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) movt r1, #:upper16:(0x410fc090 >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) teq r1, r2, lsr #4 @ test for errata affected core and if so...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) moveq r3, #1 << 1 @ fix LoUIS value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) beq start_flush_levels @ start flushing cache levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ENDPROC(v7_flush_dcache_louis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * v7_flush_dcache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Flush the whole D-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * - mm - mm_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ENTRY(v7_flush_dcache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) dmb @ ensure ordering with previous memory accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mrc p15, 1, r0, c0, c0, 1 @ read clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mov r3, r0, lsr #23 @ move LoC into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) beq finished @ if loc is 0, then no need to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) start_flush_levels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) mov r10, #0 @ start clean at cache level 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) flush_levels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) add r2, r10, r10, lsr #1 @ work out 3x current cache level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mov r1, r0, lsr r2 @ extract cache type bits from clidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) and r1, r1, #7 @ mask of the bits for current cache only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cmp r1, #2 @ see what cache we have at this level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) blt skip @ skip if no cache, or just i-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) isb @ isb to sych the new cssr&csidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) restore_irqs_notrace r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) and r2, r1, #7 @ extract the length of the cache lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) add r2, r2, #4 @ add 4 (line length offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) movw r4, #0x3ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ands r4, r4, r1, lsr #3 @ find maximum number on the way size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) clz r5, r4 @ find bit position of way size increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) movw r7, #0x7fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ands r7, r7, r1, lsr #13 @ extract max number of the index size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) loop1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mov r9, r7 @ create working copy of max index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) loop2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) THUMB( lsl r6, r4, r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) THUMB( lsl r6, r9, r2 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) THUMB( orr r11, r11, r6 ) @ factor index number into r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) subs r9, r9, #1 @ decrement the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bge loop2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) subs r4, r4, #1 @ decrement the way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bge loop1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) add r10, r10, #2 @ increment cache number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cmp r3, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #ifdef CONFIG_ARM_ERRATA_814220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bgt flush_levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mov r10, #0 @ switch back to cache level 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dsb st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ENDPROC(v7_flush_dcache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * v7_flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Flush the entire cache system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * The data cache flush is now achieved using atomic clean / invalidates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * working outwards from L1 cache. This is done using Set/Way based cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * maintenance instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * The instruction cache can still be invalidated back to the point of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * unification in a single instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ENTRY(v7_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bl v7_flush_dcache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ENDPROC(v7_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * v7_flush_kern_cache_louis(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Flush the data cache up to Level of Unification Inner Shareable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Invalidate the I-cache to the point of unification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ENTRY(v7_flush_kern_cache_louis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bl v7_flush_dcache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ENDPROC(v7_flush_kern_cache_louis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * v7_flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Flush all TLB entries in a particular address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * - mm - mm_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ENTRY(v7_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*FALLTHROUGH*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * v7_flush_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Flush a range of TLB entries in the specified address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * - start - start address (may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * - end - end address (exclusive, may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * - flags - vm_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * - we have a VIPT cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ENTRY(v7_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ENDPROC(v7_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ENDPROC(v7_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * v7_coherent_kern_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Ensure that the I and D caches are coherent within specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * region. This is typically used when code has been written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * a memory region, and will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * - the Icache does not read data from the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ENTRY(v7_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * v7_coherent_user_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Ensure that the I and D caches are coherent within specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * region. This is typically used when code has been written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * a memory region, and will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * - the Icache does not read data from the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ENTRY(v7_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) UNWIND(.fnstart )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) sub r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bic r12, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #ifdef CONFIG_ARM_ERRATA_764369
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ALT_SMP(W(dsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ALT_UP(W(nop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) add r12, r12, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cmp r12, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) dsb ishst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ldr r3, =icache_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ldr r2, [r3, #0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) icache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sub r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bic r12, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) add r12, r12, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cmp r12, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) blo 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dsb ishst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Fault handling for the cache operation above. If the virtual address in r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * isn't mapped, fail with -EFAULT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 9001:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #ifdef CONFIG_ARM_ERRATA_775420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mov r0, #-EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ENDPROC(v7_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ENDPROC(v7_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * v7_flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Ensure that the data held in the page kaddr is written back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * to the page in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * - addr - kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * - size - region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ENTRY(v7_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) add r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) sub r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bic r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #ifdef CONFIG_ARM_ERRATA_764369
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ALT_SMP(W(dsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ALT_UP(W(nop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) add r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dsb st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ENDPROC(v7_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * v7_dma_inv_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Invalidate the data cache within the specified region; we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * be performing a DMA operation in this region and we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * purge old data in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) v7_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sub r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) tst r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bic r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #ifdef CONFIG_ARM_ERRATA_764369
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ALT_SMP(W(dsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ALT_UP(W(nop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) addne r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) tst r1, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bic r1, r1, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) addlo r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) cmplo r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dsb st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ENDPROC(v7_dma_inv_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * v7_dma_clean_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) v7_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) sub r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bic r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #ifdef CONFIG_ARM_ERRATA_764369
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ALT_SMP(W(dsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ALT_UP(W(nop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) add r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dsb st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ENDPROC(v7_dma_clean_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * v7_dma_flush_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ENTRY(v7_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dcache_line_size r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sub r3, r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bic r0, r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #ifdef CONFIG_ARM_ERRATA_764369
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ALT_SMP(W(dsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ALT_UP(W(nop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) add r0, r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dsb st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ENDPROC(v7_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ENTRY(v7_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) teq r2, #DMA_FROM_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) beq v7_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) b v7_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ENDPROC(v7_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ENTRY(v7_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) teq r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) bne v7_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ENDPROC(v7_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) __INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) define_cache_functions v7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* The Broadcom Brahma-B15 read-ahead cache requires some modifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * to the v7_cache_fns, we only override the ones we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #ifndef CONFIG_CACHE_B15_RAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) globl_equ b15_flush_icache_all, v7_flush_icache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) globl_equ b15_coherent_kern_range, v7_coherent_kern_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) globl_equ b15_coherent_user_range, v7_coherent_user_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) globl_equ b15_dma_map_area, v7_dma_map_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) globl_equ b15_dma_unmap_area, v7_dma_unmap_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) globl_equ b15_dma_flush_range, v7_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) define_cache_functions b15