^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #ifndef _ASM_POWERPC_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define _ASM_POWERPC_CACHEFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cpu_has_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Book3s has no ptesync after setting a pte, so without this ptesync it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * possible for a kernel virtual mapping access to return a spurious fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * if it's accessed right after the pte is set. The page fault handler does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * not expect this type of fault. flush_cache_vmap is not exactly the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * place to put this, but it seems to work well enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline void flush_cache_vmap(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) asm volatile("ptesync" ::: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define flush_cache_vmap flush_cache_vmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) extern void flush_dcache_page(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void flush_icache_range(unsigned long start, unsigned long stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define flush_icache_range flush_icache_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long addr, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define flush_icache_user_page flush_icache_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void flush_dcache_icache_page(struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void __flush_dcache_icache(void *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * flush_dcache_range(): Write any modified data cache blocks out to memory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * invalidate them. Does not invalidate the corresponding instruction cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @start: the start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @stop: the stop address (exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline void flush_dcache_range(unsigned long start, unsigned long stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long shift = l1_dcache_shift();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned long bytes = l1_dcache_bytes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void *addr = (void *)(start & ~(bytes - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long size = stop - (unsigned long)addr + (bytes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (IS_ENABLED(CONFIG_PPC64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) mb(); /* sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) for (i = 0; i < size >> shift; i++, addr += bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dcbf(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mb(); /* sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Write any modified data cache blocks out to memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Does not invalidate the corresponding cache lines (especially for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * any corresponding instruction cache).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline void clean_dcache_range(unsigned long start, unsigned long stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long shift = l1_dcache_shift();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long bytes = l1_dcache_bytes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void *addr = (void *)(start & ~(bytes - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long size = stop - (unsigned long)addr + (bytes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) for (i = 0; i < size >> shift; i++, addr += bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dcbst(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mb(); /* sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Like above, but invalidate the D-cache. This is used by the 8xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * to invalidate the cache so the PPC core doesn't get stale data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * from the CPM (no cache snooping here :-).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline void invalidate_dcache_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long shift = l1_dcache_shift();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long bytes = l1_dcache_bytes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void *addr = (void *)(start & ~(bytes - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long size = stop - (unsigned long)addr + (bytes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) for (i = 0; i < size >> shift; i++, addr += bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dcbi(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mb(); /* sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_4xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline void flush_instruction_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) iccci((void *)KERNELBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) isync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void flush_instruction_cache(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #include <asm-generic/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif /* _ASM_POWERPC_CACHEFLUSH_H */