^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/cache-v6.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001 Deep Blue Solutions Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This is the "shell" of the ARMv6 processor support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define CACHE_LINE_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define D_CACHE_LINE_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define BTB_FLUSH_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * v6_flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Flush the whole I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * This erratum is present in 1136, 1156 and 1176. It does not affect the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * MPCore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * r0 - set to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * r1 - corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) ENTRY(v6_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifdef CONFIG_ARM_ERRATA_411920
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) mrs r1, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) cpsid ifa @ disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) msr cpsr_cx, r1 @ restore interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .rept 11 @ ARM Ltd recommends at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) nop @ 11 NOPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ENDPROC(v6_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * v6_flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Flush the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ENTRY(v6_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifndef CONFIG_ARM_ERRATA_411920
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) b v6_flush_icache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * v6_flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Flush all TLB entries in a particular address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * - mm - mm_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ENTRY(v6_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*FALLTHROUGH*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * v6_flush_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Flush a range of TLB entries in the specified address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * - start - start address (may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * - end - end address (exclusive, may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * - flags - vm_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * - we have a VIPT cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ENTRY(v6_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * v6_coherent_kern_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Ensure that the I and D caches are coherent within specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * region. This is typically used when code has been written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * a memory region, and will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * - the Icache does not read data from the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ENTRY(v6_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * v6_coherent_user_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Ensure that the I and D caches are coherent within specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * region. This is typically used when code has been written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * a memory region, and will be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * It is assumed that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * - the Icache does not read data from the write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ENTRY(v6_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) UNWIND(.fnstart )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bic r0, r0, #CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) add r0, r0, #CACHE_LINE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifndef CONFIG_ARM_ERRATA_411920
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) b v6_flush_icache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Fault handling for the cache operation above. If the virtual address in r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * isn't mapped, fail with -EFAULT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 9001:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mov r0, #-EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) UNWIND(.fnend )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ENDPROC(v6_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ENDPROC(v6_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * v6_flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Ensure that the data held in the page kaddr is written back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * to the page in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * - addr - kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * - size - region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ENTRY(v6_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) add r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bic r0, r0, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) add r0, r0, #D_CACHE_LINE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) mcr p15, 0, r0, c7, c10, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * v6_dma_inv_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Invalidate the data cache within the specified region; we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * be performing a DMA operation in this region and we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * purge old data in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) v6_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #ifdef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ldrb r2, [r0] @ read for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) strb r2, [r0] @ write for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) tst r0, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bic r0, r0, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mcrne p15, 0, r0, c7, c10, 1 @ clean D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) tst r1, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ldrbne r2, [r1, #-1] @ read for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) strbne r2, [r1, #-1] @ write for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bic r1, r1, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) add r0, r0, #D_CACHE_LINE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #ifdef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ldrlo r2, [r0] @ read for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) strlo r2, [r0] @ write for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * v6_dma_clean_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) v6_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) bic r0, r0, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifdef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ldr r2, [r0] @ read for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) mcr p15, 0, r0, c7, c10, 1 @ clean D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) mcr p15, 0, r0, c7, c11, 1 @ clean unified line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) add r0, r0, #D_CACHE_LINE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * v6_dma_flush_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ENTRY(v6_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #ifdef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ldrb r2, [r0] @ read for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) strb r2, [r0] @ write for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) bic r0, r0, #D_CACHE_LINE_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #ifdef HARVARD_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) add r0, r0, #D_CACHE_LINE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #ifdef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ldrblo r2, [r0] @ read for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) strblo r2, [r0] @ write for ownership
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ENTRY(v6_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) teq r2, #DMA_FROM_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) beq v6_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #ifndef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) b v6_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) teq r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) beq v6_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) b v6_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ENDPROC(v6_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ENTRY(v6_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #ifndef CONFIG_DMA_CACHE_RWFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) teq r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) bne v6_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ENDPROC(v6_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .globl v6_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) __INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) define_cache_functions v6