^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2005-2017 Andes Technology Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/proc-fns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/shmparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/cache_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) extern struct cache_info L1_cache_info[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long line_size, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) line_size = L1_cache_info[DCACHE].line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) start = start & ~(line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) end = (end + line_size - 1) & ~(line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) cpu_cache_wbinval_range(start, end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) EXPORT_SYMBOL(flush_icache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void flush_icache_page(struct vm_area_struct *vma, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) kaddr = (unsigned long)kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) kunmap_atomic((void *)kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) flush_icache_range(kaddr, kaddr + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) kunmap_atomic((void *)kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pte_t * pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long pfn = pte_pfn(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (vma->vm_mm == current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __nds32__tlbop_rwr(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) __nds32__isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) (vma->vm_flags & VM_EXEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned long kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) kaddr = (unsigned long)kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) kunmap_atomic((void *)kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifdef CONFIG_CPU_CACHE_ALIASING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline unsigned long aliasing(unsigned long addr, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long kaddr, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define BASE_ADDR0 0xffffc000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pte = (pa | PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __nds32__tlbop_rwlk(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __nds32__isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void kunmap01(unsigned long kaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __nds32__tlbop_unlk(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __nds32__tlbop_inv(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __nds32__isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long kaddr, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define BASE_ADDR1 0xffff8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pte = (pa | PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __nds32__tlbop_rwlk(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __nds32__isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cpu_dcache_wbinval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) cpu_icache_inval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void flush_cache_dup_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void flush_cache_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if ((end - start) > 8 * PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cpu_dcache_wbinval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) cpu_icache_inval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (va_present(vma->vm_mm, start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void flush_cache_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long addr, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long vto, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) vto = kremap0(addr, pfn << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) kunmap01(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void flush_cache_vmap(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cpu_dcache_wbinval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cpu_icache_inval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void flush_cache_vunmap(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cpu_dcache_wbinval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cpu_icache_inval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct page *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cpu_dcache_wbinval_page((unsigned long)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) cpu_icache_inval_page((unsigned long)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) copy_page(vto, vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) cpu_dcache_wbinval_page((unsigned long)vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) cpu_icache_inval_page((unsigned long)vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cpu_dcache_wbinval_page((unsigned long)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) cpu_icache_inval_page((unsigned long)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) clear_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) cpu_dcache_wbinval_page((unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cpu_icache_inval_page((unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void copy_user_highpage(struct page *to, struct page *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned long vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kto = ((unsigned long)page_address(to) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pto = page_to_phys(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pfrom = page_to_phys(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (aliasing(vaddr, (unsigned long)kfrom))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) cpu_dcache_wb_page((unsigned long)kfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) vto = kremap0(vaddr, pto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) vfrom = kremap1(vaddr, pfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) copy_page((void *)vto, (void *)vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) kunmap01(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) kunmap01(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL(copy_user_highpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void clear_user_highpage(struct page *page, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned long vto, flags, kto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) kto = ((unsigned long)page_address(page) & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (aliasing(kto, vaddr) && kto != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) cpu_dcache_inval_page(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cpu_icache_inval_page(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) vto = kremap0(vaddr, page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) clear_page((void *)vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kunmap01(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) EXPORT_SYMBOL(clear_user_highpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (mapping && !mapping_mapped(mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) set_bit(PG_dcache_dirty, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned long kaddr, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) kaddr = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cpu_dcache_wbinval_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long vaddr, kto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) vaddr = page->index << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (aliasing(vaddr, kaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kto = kremap0(vaddr, page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) cpu_dcache_wbinval_page(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kunmap01(kto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) EXPORT_SYMBOL(flush_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long vaddr, void *dst, void *src, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long line_size, start, end, vto, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) vto = kremap0(vaddr, page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (vma->vm_flags & VM_EXEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) line_size = L1_cache_info[DCACHE].line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) start = (unsigned long)dst & ~(line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ((unsigned long)dst + len + line_size - 1) & ~(line_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cpu_cache_wbinval_range(start, end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kunmap01(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned long vaddr, void *dst, void *src, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long vto, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) vto = kremap0(vaddr, page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kunmap01(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void flush_anon_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct page *page, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long kaddr, flags, ktmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!PageAnon(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (vma->vm_mm != current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) cpu_icache_inval_page(vaddr & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) kaddr = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (aliasing(vaddr, kaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ktmp = kremap0(vaddr, page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) cpu_dcache_wbinval_page(ktmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) kunmap01(ktmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void flush_kernel_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cpu_dcache_wbinval_page((unsigned long)page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) EXPORT_SYMBOL(flush_kernel_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void flush_kernel_vmap_range(void *addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) EXPORT_SYMBOL(flush_kernel_vmap_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void invalidate_kernel_vmap_range(void *addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) EXPORT_SYMBOL(invalidate_kernel_vmap_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #endif