^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_GENERIC_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_GENERIC_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) struct mm_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) struct vm_area_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) struct page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) struct address_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The cache doesn't need to be flushed when TLB entries change when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * the cache is mapped to physical memory, not virtual memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef flush_cache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline void flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifndef flush_cache_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline void flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #ifndef flush_cache_dup_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static inline void flush_cache_dup_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifndef flush_cache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline void flush_cache_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifndef flush_cache_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline void flush_cache_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long vmaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static inline void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifndef flush_dcache_mmap_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline void flush_dcache_mmap_lock(struct address_space *mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifndef flush_dcache_mmap_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #ifndef flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline void flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifndef flush_icache_user_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define flush_icache_user_range flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #ifndef flush_icache_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline void flush_icache_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #ifndef flush_icache_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline void flush_icache_user_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #ifndef flush_cache_vmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline void flush_cache_vmap(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #ifndef flush_cache_vunmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifndef copy_to_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) memcpy(dst, src, len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) flush_icache_user_page(vma, page, vaddr, len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifndef copy_from_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) memcpy(dst, src, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif /* _ASM_GENERIC_CACHEFLUSH_H */