Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/flush.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1995-2002 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/cachetype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #ifdef CONFIG_ARM_HEAVY_MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) void (*soc_mb)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) void arm_heavy_mb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #ifdef CONFIG_OUTER_CACHE_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	if (outer_cache.sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		outer_cache.sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	if (soc_mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		soc_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) EXPORT_SYMBOL(arm_heavy_mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #ifdef CONFIG_CPU_CACHE_VIPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	const int zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	asm(	"mcrr	p15, 0, %1, %0, c14\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	"	mcr	p15, 0, %2, c7, c10, 4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	    :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	    : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	unsigned long offset = vaddr & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	unsigned long to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	to = va + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	flush_icache_range(to, to + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) void flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (cache_is_vivt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		vivt_flush_cache_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		"	mcr	p15, 0, %0, c7, c10, 4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		    :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		    : "r" (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		    : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (cache_is_vivt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		vivt_flush_cache_range(vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		"	mcr	p15, 0, %0, c7, c10, 4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		    :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		    : "r" (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		    : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (cache_is_vivt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		vivt_flush_cache_page(vma, user_addr, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		flush_pfn_alias(pfn, user_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define flush_pfn_alias(pfn,vaddr)		do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define FLAG_PA_IS_EXEC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define FLAG_PA_CORE_IN_MM 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void flush_ptrace_access_other(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			   unsigned long len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (cache_is_vivt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		if (flags & FLAG_PA_CORE_IN_MM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			unsigned long addr = (unsigned long)kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			__cpuc_coherent_kern_range(addr, addr + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		flush_pfn_alias(page_to_pfn(page), uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	/* VIPT non-aliasing D-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (flags & FLAG_PA_IS_EXEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		unsigned long addr = (unsigned long)kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		if (icache_is_vipt_aliasing())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			flush_icache_alias(page_to_pfn(page), uaddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			__cpuc_coherent_kern_range(addr, addr + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (cache_ops_need_broadcast())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			smp_call_function(flush_ptrace_access_other,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 					  NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			 unsigned long uaddr, void *kaddr, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		flags |= FLAG_PA_CORE_IN_MM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		flags |= FLAG_PA_IS_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			     void *kaddr, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * Copy user data from/to a page which is mapped into a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * processes address space.  Really, we want to allow our "user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * space" model to handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Note that this code needs to run on the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		       unsigned long uaddr, void *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		       unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	flush_ptrace_access(vma, page, uaddr, dst, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void __flush_dcache_page(struct address_space *mapping, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 * Writeback any data associated with the kernel mapping of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * page.  This ensures that data in the physical page is mutually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * coherent with the kernels mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (!PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		__cpuc_flush_dcache_area(page_address(page), page_size(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		if (cache_is_vipt_nonaliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			for (i = 0; i < compound_nr(page); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 				void *addr = kmap_atomic(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 				kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			for (i = 0; i < compound_nr(page); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				void *addr = kmap_high_get(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 				if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 					kunmap_high(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	 * If this is a page cache page, and we have an aliasing VIPT cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 * we only need to do one flush - which would be at the relevant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * userspace colour, which is congruent with page->index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (mapping && cache_is_vipt_aliasing())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		flush_pfn_alias(page_to_pfn(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				page->index << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct mm_struct *mm = current->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct vm_area_struct *mpnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 * There are possible user space mappings of this page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	 * - VIVT cache: we need to also write back and invalidate all user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	 *   data in the current VM view associated with this page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	 * - aliasing VIPT: we only need to find one mapping of this page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	pgoff = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	flush_dcache_mmap_lock(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		 * If this VMA is not in our MM, we can ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		if (mpnt->vm_mm != mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		if (!(mpnt->vm_flags & VM_MAYSHARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	flush_dcache_mmap_unlock(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #if __LINUX_ARM_ARCH__ >= 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void __sync_icache_dcache(pte_t pteval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		/* only flush non-aliasing VIPT caches for exec mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	pfn = pte_pfn(pteval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (cache_is_vipt_aliasing())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		__flush_dcache_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (pte_exec(pteval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * Ensure cache coherency between kernel mapping and userspace mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * of this page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * We have three cases to consider:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  *  - VIPT non-aliasing cache: fully coherent so nothing required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  *  - VIVT: fully aliasing, so we need to handle every alias in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  *          current VM view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  *  - VIPT aliasing: need to handle one alias in our current VM view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * If we need to handle aliasing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  *  If the page only exists in the page cache and there are no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  *  space mappings, we can be lazy and remember that we may have dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  *  kernel cache lines for later.  Otherwise, we assume we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  *  aliasing mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  * Note that we disable the lazy flush for SMP configurations where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * the cache maintenance operations are not automatically broadcasted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 * The zero page is never written to, so never has any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * cache lines, and therefore never needs to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	if (page == ZERO_PAGE(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		if (test_bit(PG_dcache_clean, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (!cache_ops_need_broadcast() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	    mapping && !page_mapcount(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		__flush_dcache_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		if (mapping && cache_is_vivt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			__flush_dcache_aliases(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		else if (mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		set_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) EXPORT_SYMBOL(flush_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * Ensure cache coherency for the kernel mapping of this page. We can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * assume that the page is pinned via kmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * If the page only exists in the page cache and there are no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * space mappings, this is a no-op since the page was already marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * dirty at creation.  Otherwise, we need to flush the dirty kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * cache lines directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void flush_kernel_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		if (!mapping || mapping_mapped(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			addr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			 * kmap_atomic() doesn't set the page virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			 * address for highmem pages, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 			 * kunmap_atomic() takes care of cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			 * flushing already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) EXPORT_SYMBOL(flush_kernel_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * Flush an anonymous page so that users of get_user_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * can safely access the data.  The expected sequence is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  *  get_user_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  *    -> flush_anon_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  *  memcpy() to/from page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  *  if written to page, flush_dcache_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	/* VIPT non-aliasing caches need do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (cache_is_vipt_nonaliasing())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 * Write back and invalidate userspace mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	pfn = page_to_pfn(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (cache_is_vivt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		flush_cache_page(vma, vmaddr, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		 * For aliasing VIPT, we can flush an alias of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		 * userspace address only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		flush_pfn_alias(pfn, vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		__flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 * Invalidate kernel mapping.  No data should be contained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	 * in this mapping of the page.  FIXME: this is overkill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	 * since we actually ask for a write-back and invalidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }