^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2012 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef _ASM_RISCV_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _ASM_RISCV_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static inline void local_flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) __asm__ __volatile__ ("sfence.vma" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* Flush one page from local TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline void local_flush_tlb_page(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #else /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define local_flush_tlb_all() do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define local_flush_tlb_page(addr) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) void flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void flush_tlb_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #else /* CONFIG_SMP && CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define flush_tlb_all() local_flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline void flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define flush_tlb_mm(mm) flush_tlb_all()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #endif /* !CONFIG_SMP || !CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Flush a range of kernel pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline void flush_tlb_kernel_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif /* _ASM_RISCV_TLBFLUSH_H */