^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/cache-v4.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1997-2002 Russell king
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Unconditionally clean and invalidate the entire icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ENTRY(v4_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) ENDPROC(v4_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * flush_user_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Invalidate all cache entries in a particular address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * - mm - mm_struct describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) ENTRY(v4_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * flush_kern_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Clean and invalidate the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) ENTRY(v4_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #ifdef CONFIG_CPU_CP15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * flush_user_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Invalidate a range of cache entries in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * - start - start address (may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * - end - end address (exclusive, may not be aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * - flags - vma_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ENTRY(v4_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_CPU_CP15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * coherent_kern_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ENTRY(v4_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * coherent_user_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ENTRY(v4_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Ensure no D cache aliasing occurs, either with itself or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * the I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * - addr - kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * - size - region size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ENTRY(v4_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * dma_flush_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Clean and invalidate the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ENTRY(v4_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_CPU_CP15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ENTRY(v4_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) teq r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bne v4_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ENTRY(v4_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ENDPROC(v4_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ENDPROC(v4_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) .globl v4_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) define_cache_functions v4