^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ALPHA_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ALPHA_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef __EXTERN_INLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define __EXTERN_INLINE extern inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define __MMU_EXTERN_INLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) extern void __load_new_mm_context(struct mm_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* Use a few helper functions to hide the ugly broken ASN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) numbers on early Alphas (ev4 and ev45). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) __EXTERN_INLINE void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) ev4_flush_tlb_current(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __load_new_mm_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) tbiap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __EXTERN_INLINE void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ev5_flush_tlb_current(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __load_new_mm_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Flush just one page in the current TLB set. We need to be very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) careful about the icache here, there is no way to invalidate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) specific icache page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __EXTERN_INLINE void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ev4_flush_tlb_current_page(struct mm_struct * mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int tbi_flag = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (vma->vm_flags & VM_EXEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __load_new_mm_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) tbi_flag = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) tbi(tbi_flag, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) __EXTERN_INLINE void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ev5_flush_tlb_current_page(struct mm_struct * mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __load_new_mm_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) tbi(2, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifdef CONFIG_ALPHA_GENERIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) # define flush_tlb_current alpha_mv.mv_flush_tlb_current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) # ifdef CONFIG_ALPHA_EV4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) # define flush_tlb_current ev4_flush_tlb_current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) # define flush_tlb_current_page ev4_flush_tlb_current_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) # else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) # define flush_tlb_current ev5_flush_tlb_current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) # define flush_tlb_current_page ev5_flush_tlb_current_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #ifdef __MMU_EXTERN_INLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #undef __EXTERN_INLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #undef __MMU_EXTERN_INLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Flush current user mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) flush_tlb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) flush_tlb_current(current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Flush someone else's user mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) flush_tlb_other(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long *mmc = &mm->context[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Check it's not zero first to avoid cacheline ping pong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) when possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (*mmc) *mmc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Flush everything (kernel mapping may also have changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) due to vmalloc/vfree). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) tbia();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Flush a specified user mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (mm == current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) flush_tlb_current(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) flush_tlb_other(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Page-granular tlb flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (mm == current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) flush_tlb_current_page(mm, vma, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) flush_tlb_other(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Flush a specified range of user mapping. On the Alpha we flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) the whole user tlb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) flush_tlb_mm(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #else /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) extern void flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) extern void flush_tlb_mm(struct mm_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline void flush_tlb_kernel_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif /* _ALPHA_TLBFLUSH_H */