^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _M68K_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _M68K_TLBFLUSH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef CONFIG_SUN3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/mcfmmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static inline void flush_tlb_kernel_page(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) if (CPU_IS_COLDFIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) mmu_write(MMUOR, MMUOR_CNL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) } else if (CPU_IS_040_OR_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) mm_segment_t old_fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) __asm__ __volatile__(".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) "pflush (%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) ".chip 68k"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) : : "a" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) set_fs(old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) } else if (CPU_IS_020_OR_030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * flush all user-space atc entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static inline void __flush_tlb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (CPU_IS_COLDFIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) mmu_write(MMUOR, MMUOR_CNL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) } else if (CPU_IS_040_OR_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __asm__ __volatile__(".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "pflushan\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) ".chip 68k");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) } else if (CPU_IS_020_OR_030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __asm__ __volatile__("pflush #0,#4");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline void __flush_tlb040_one(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __asm__ __volatile__(".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "pflush (%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ".chip 68k"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) : : "a" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline void __flush_tlb_one(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (CPU_IS_COLDFIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mmu_write(MMUOR, MMUOR_CNL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) else if (CPU_IS_040_OR_060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) __flush_tlb040_one(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) else if (CPU_IS_020_OR_030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define flush_tlb() __flush_tlb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * flush all atc entries (both kernel and user-space entries).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (CPU_IS_COLDFIRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mmu_write(MMUOR, MMUOR_CNL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) } else if (CPU_IS_040_OR_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __asm__ __volatile__(".chip 68040\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "pflusha\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ".chip 68k");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) } else if (CPU_IS_020_OR_030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __asm__ __volatile__("pflusha");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline void flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (mm == current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __flush_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (vma->vm_mm == current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mm_segment_t old_fs = force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __flush_tlb_one(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) force_uaccess_end(old_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline void flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (vma->vm_mm == current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __flush_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Reserved PMEGs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Flush all userspace mappings one by one... (why no flush command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) sun?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static inline void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned char ctx, oldctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) oldctx = sun3_get_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for(ctx = 0; ctx < 8; ctx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sun3_put_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sun3_put_segmap(addr, SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) sun3_put_context(oldctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* erase all of the userspace pmeg maps, we've clobbered them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) all anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if(pmeg_alloc[addr] == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) pmeg_alloc[addr] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pmeg_ctx[addr] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pmeg_vaddr[addr] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Clear user TLB entries within the context named in mm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline void flush_tlb_mm (struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned char oldctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned char seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) oldctx = sun3_get_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sun3_put_context(mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) seg = sun3_get_segmap(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if(seg == SUN3_INVALID_PMEG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) sun3_put_segmap(i, SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pmeg_alloc[seg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pmeg_ctx[seg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pmeg_vaddr[seg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sun3_put_context(oldctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Flush a single TLB page. In this case, we're limited to flushing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) single PMEG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline void flush_tlb_page (struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned char oldctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned char i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) oldctx = sun3_get_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) sun3_put_context(vma->vm_mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) addr &= ~SUN3_PMEG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) pmeg_alloc[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pmeg_ctx[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pmeg_vaddr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) sun3_put_segmap (addr, SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sun3_put_context(oldctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Flush a range of pages from TLB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline void flush_tlb_range (struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned char seg, oldctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) start &= ~SUN3_PMEG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) oldctx = sun3_get_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) sun3_put_context(mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) while(start < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if(pmeg_ctx[seg] == mm->context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pmeg_alloc[seg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pmeg_ctx[seg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pmeg_vaddr[seg] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sun3_put_segmap(start, SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) start += SUN3_PMEG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Flush kernel page from TLB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline void flush_tlb_kernel_page (unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #else /* !CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * flush all user-space atc entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline void __flush_tlb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline void __flush_tlb_one(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define flush_tlb() __flush_tlb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * flush all atc entries (both kernel and user-space entries).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline void flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline void flush_tlb_range(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static inline void flush_tlb_kernel_page(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #endif /* _M68K_TLBFLUSH_H */