^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/cache-v4wt.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1997-2002 Russell king
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * ARMv4 write through cache operations support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * We assume that the write buffer is not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The size of one data cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define CACHE_DLINESIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The number of data cache segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define CACHE_DSEGMENTS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * The number of lines in a cache segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CACHE_DENTRIES 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * This is the size at which it becomes more efficient to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * clean the whole cache, rather than using the individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * cache line maintenance instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * *** This needs benchmarking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define CACHE_DLIMIT 16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Unconditionally clean and invalidate the entire icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ENTRY(v4wt_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ENDPROC(v4wt_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * flush_user_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Invalidate all cache entries in a particular address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ENTRY(v4wt_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * flush_kern_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Clean and invalidate the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ENTRY(v4wt_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mov r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __flush_whole_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * flush_user_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Clean and invalidate a range of cache entries in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * - start - start address (inclusive, page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * - end - end address (exclusive, page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * - flags - vma_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ENTRY(v4wt_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) sub r3, r1, r0 @ calculate total size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) cmp r3, #CACHE_DLIMIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bhs __flush_whole_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * coherent_kern_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ENTRY(v4wt_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* FALLTRHOUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * coherent_user_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ENTRY(v4wt_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bic r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Ensure no D cache aliasing occurs, either with itself or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * the I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * - addr - kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * - size - region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ENTRY(v4wt_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) mov r2, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) add r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* fallthrough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * dma_inv_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Invalidate (discard) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * May not write back any entries. If 'start' or 'end'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * are not cache line aligned, those lines must be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) v4wt_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bic r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * dma_flush_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Clean and invalidate the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .globl v4wt_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .equ v4wt_dma_flush_range, v4wt_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ENTRY(v4wt_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) teq r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bne v4wt_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ENTRY(v4wt_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ENDPROC(v4wt_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ENDPROC(v4wt_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) .globl v4wt_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) define_cache_functions v4wt