^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) 2001 - 2013 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef _XTENSA_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define _XTENSA_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Lo-level routines for cache flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * invalidate data or instruction cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * __invalidate_icache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * __invalidate_icache_page(adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * __invalidate_dcache_page(adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * __invalidate_icache_range(from,size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * __invalidate_dcache_range(from,size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * flush data cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * __flush_dcache_page(adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * flush and invalidate data cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * __flush_invalidate_dcache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * __flush_invalidate_dcache_page(adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * __flush_invalidate_dcache_range(from,size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * specials for cache aliasing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * __flush_invalidate_dcache_page_alias(vaddr,paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * __invalidate_dcache_page_alias(vaddr,paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * __invalidate_icache_page_alias(vaddr,paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) extern void __invalidate_dcache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern void __invalidate_icache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) extern void __invalidate_dcache_page(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) extern void __invalidate_icache_page(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extern void __invalidate_icache_range(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) extern void __invalidate_dcache_range(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #if XCHAL_DCACHE_IS_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) extern void __flush_invalidate_dcache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) extern void __flush_dcache_page(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) extern void __flush_dcache_range(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) extern void __flush_invalidate_dcache_page(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline void __flush_dcache_page(unsigned long va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) # define __flush_invalidate_dcache_all() __invalidate_dcache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long phys) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline void __invalidate_dcache_page_alias(unsigned long virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long phys) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void __invalidate_icache_page_alias(unsigned long virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned long phys) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * We have physically tagged caches - nothing to do here -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * unless we have cache aliasing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Pages can get remapped. Because this might change the 'color' of that page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * we have to flush the cache before the PTE is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * (see also Documentation/core-api/cachetlb.rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #if defined(CONFIG_MMU) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void flush_cache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void flush_cache_range(struct vm_area_struct*, ulong, ulong);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void flush_icache_range(unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void flush_cache_page(struct vm_area_struct*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define flush_cache_all local_flush_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define flush_cache_range local_flush_cache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define flush_icache_range local_flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define flush_cache_page local_flush_cache_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define local_flush_cache_all() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __flush_invalidate_dcache_all(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __invalidate_icache_all(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define flush_cache_mm(mm) flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define flush_cache_vmap(start,end) flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define flush_cache_vunmap(start,end) flush_cache_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) extern void flush_dcache_page(struct page*);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void local_flush_cache_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void local_flush_cache_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long address, unsigned long pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define flush_cache_all() do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define flush_cache_mm(mm) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define flush_cache_dup_mm(mm) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define flush_cache_vmap(start,end) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define flush_cache_vunmap(start,end) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define flush_dcache_page(page) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define flush_icache_range local_flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define flush_cache_page(vma, addr, pfn) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define flush_cache_range(vma, start, end) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define flush_icache_user_range flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Ensure consistency between data and instruction cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define local_flush_icache_range(start, end) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) __flush_dcache_range(start, (end) - (start)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __invalidate_icache_range(start,(end) - (start)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* This is not required, see Documentation/core-api/cachetlb.rst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define flush_icache_page(vma,page) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define flush_dcache_mmap_lock(mapping) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) extern void copy_to_user_page(struct vm_area_struct*, struct page*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long, void*, const void*, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) extern void copy_from_user_page(struct vm_area_struct*, struct page*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long, void*, const void*, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) memcpy(dst, src, len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) __flush_dcache_range((unsigned long) dst, len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __invalidate_icache_range((unsigned long) dst, len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) memcpy(dst, src, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif /* _XTENSA_CACHEFLUSH_H */