^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 1VPN -> 2PFN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) tlb_invalid_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #ifdef CONFIG_CPU_HAS_TLBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) tlb_invalid_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * MMU operation regs only could invalid tlb entry in jtlb and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * need change asid field to invalid I-utlb & D-utlb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifndef CONFIG_CPU_HAS_TLBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define restore_asid_inv_utlb(oldpid, newpid) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (oldpid == newpid) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) write_mmu_entryhi(oldpid + 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) write_mmu_entryhi(oldpid); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long newpid = cpu_asid(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) start &= TLB_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) end += TLB_ENTRY_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) end &= TLB_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifdef CONFIG_CPU_HAS_TLBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) asm volatile("tlbi.vas %0"::"r"(start | newpid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) start += 2*PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) sync_is();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long flags, oldpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) oldpid = read_mmu_entryhi() & ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) write_mmu_entryhi(start | newpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) start += 2*PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) idx = read_mmu_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) tlb_invalid_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) restore_asid_inv_utlb(oldpid, newpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) start &= TLB_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) end += TLB_ENTRY_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) end &= TLB_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #ifdef CONFIG_CPU_HAS_TLBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) asm volatile("tlbi.vaas %0"::"r"(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) start += 2*PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) sync_is();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned long flags, oldpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) oldpid = read_mmu_entryhi() & ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) write_mmu_entryhi(start | oldpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) start += 2*PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) idx = read_mmu_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) tlb_invalid_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) restore_asid_inv_utlb(oldpid, oldpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int newpid = cpu_asid(vma->vm_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) addr &= TLB_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef CONFIG_CPU_HAS_TLBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) asm volatile("tlbi.vas %0"::"r"(addr | newpid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sync_is();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int oldpid, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) oldpid = read_mmu_entryhi() & ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) write_mmu_entryhi(addr | newpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) idx = read_mmu_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) tlb_invalid_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) restore_asid_inv_utlb(oldpid, newpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void flush_tlb_one(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) addr &= TLB_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifdef CONFIG_CPU_HAS_TLBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) asm volatile("tlbi.vaas %0"::"r"(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sync_is();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int oldpid, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) oldpid = read_mmu_entryhi() & ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) write_mmu_entryhi(addr | oldpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) tlb_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) idx = read_mmu_index();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) tlb_invalid_indexed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) restore_asid_inv_utlb(oldpid, oldpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) EXPORT_SYMBOL(flush_tlb_one);