^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/cache-fa.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005 Faraday Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on cache-v4wb.S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1997-2002 Russell king
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Processors: FA520 FA526 FA626
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "proc-macros.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The size of one data cache line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define CACHE_DLINESIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * The total size of the data cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #ifdef CONFIG_ARCH_GEMINI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CACHE_DSIZE 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define CACHE_DSIZE 16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* FIXME: put optimal value here. Current one is just estimation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CACHE_DLIMIT (CACHE_DSIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * flush_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Unconditionally clean and invalidate the entire icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) ENTRY(fa_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ENDPROC(fa_flush_icache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * flush_user_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Clean and invalidate all cache entries in a particular address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ENTRY(fa_flush_user_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * flush_kern_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Clean and invalidate the entire cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ENTRY(fa_flush_kern_cache_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mov r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __flush_whole_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * flush_user_cache_range(start, end, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Invalidate a range of cache entries in the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * - start - start address (inclusive, page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * - end - end address (exclusive, page aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * - flags - vma_area_struct flags describing address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ENTRY(fa_flush_user_cache_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mov ip, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) sub r3, r1, r0 @ calculate total size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) cmp r3, #CACHE_DLIMIT @ total size >= limit?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) bhs __flush_whole_cache @ flush whole D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 1: tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mcrne p15, 0, r0, c7, c5, 1 @ invalidate I line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) tst r2, #VM_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * coherent_kern_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ENTRY(fa_coherent_kern_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * coherent_user_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Ensure coherency between the Icache and the Dcache in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * region described by start. If you have non-snooping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Harvard caches, you need to implement this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ENTRY(fa_coherent_user_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bic r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * flush_kern_dcache_area(void *addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Ensure that the data held in the page kaddr is written back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * to the page in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * - addr - kernel address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ENTRY(fa_flush_kern_dcache_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) add r1, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * dma_inv_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Invalidate (discard) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * May not write back any entries. If 'start' or 'end'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * are not cache line aligned, those lines must be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) fa_dma_inv_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) tst r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bic r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) tst r1, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bic r1, r1, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * dma_clean_range(start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Clean (write back) the specified virtual address range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * - start - virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * - end - virtual end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) fa_dma_clean_range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) bic r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * dma_flush_range(start,end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * - start - virtual start address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * - end - virtual end address of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ENTRY(fa_dma_flush_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bic r0, r0, #CACHE_DLINESIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) add r0, r0, #CACHE_DLINESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cmp r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) blo 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mov r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * dma_map_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ENTRY(fa_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) cmp r2, #DMA_TO_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) beq fa_dma_clean_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bcs fa_dma_inv_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) b fa_dma_flush_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ENDPROC(fa_dma_map_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * dma_unmap_area(start, size, dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * - start - kernel virtual start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * - size - size of region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * - dir - DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ENTRY(fa_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ENDPROC(fa_dma_unmap_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .globl fa_flush_kern_cache_louis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) __INITDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) define_cache_functions fa