Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2009, Wind River Systems Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/cpuinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static void __flush_dcache(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	start &= ~(cpuinfo.dcache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	end += (cpuinfo.dcache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	end &= ~(cpuinfo.dcache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	if (end > start + cpuinfo.dcache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		end = start + cpuinfo.dcache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		__asm__ __volatile__ ("   flushd 0(%0)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 					: /* Outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 					: /* Inputs  */ "r"(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 					/* : No clobber */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static void __invalidate_dcache(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	start &= ~(cpuinfo.dcache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	end += (cpuinfo.dcache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	end &= ~(cpuinfo.dcache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		__asm__ __volatile__ ("   initda 0(%0)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 					: /* Outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 					: /* Inputs  */ "r"(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 					/* : No clobber */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static void __flush_icache(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	start &= ~(cpuinfo.icache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	end += (cpuinfo.icache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	end &= ~(cpuinfo.icache_line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (end > start + cpuinfo.icache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		end = start + cpuinfo.icache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		__asm__ __volatile__ ("   flushi %0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 					: /* Outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 					: /* Inputs  */ "r"(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 					/* : No clobber */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	__asm__ __volatile(" flushp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static void flush_aliases(struct address_space *mapping, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct mm_struct *mm = current->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct vm_area_struct *mpnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	pgoff_t pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	pgoff = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	flush_dcache_mmap_lock(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		if (mpnt->vm_mm != mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (!(mpnt->vm_flags & VM_MAYSHARE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		flush_cache_page(mpnt, mpnt->vm_start + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	flush_dcache_mmap_unlock(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	__flush_dcache(0, cpuinfo.dcache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	__flush_icache(0, cpuinfo.icache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void flush_cache_dup_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	__flush_dcache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	__flush_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void flush_dcache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	__flush_dcache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	__flush_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL(flush_dcache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void invalidate_dcache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	__invalidate_dcache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) EXPORT_SYMBOL(invalidate_dcache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	__flush_dcache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (vma == NULL || (vma->vm_flags & VM_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		__flush_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void flush_icache_page(struct vm_area_struct *vma, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	unsigned long start = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	unsigned long end = start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	__flush_dcache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	__flush_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	unsigned long start = vmaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	unsigned long end = start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	__flush_dcache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		__flush_icache(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void __flush_dcache_page(struct address_space *mapping, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 * Writeback any data associated with the kernel mapping of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 * page.  This ensures that data in the physical page is mutually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	 * coherent with the kernels mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	unsigned long start = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	__flush_dcache(start, start + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 * The zero page is never written to, so never has any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	 * cache lines, and therefore never needs to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (page == ZERO_PAGE(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	/* Flush this page if there are aliases. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (mapping && !mapping_mapped(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		__flush_dcache_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			unsigned long start = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			flush_aliases(mapping,  page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			flush_icache_range(start, start + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		set_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) EXPORT_SYMBOL(flush_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void update_mmu_cache(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		      unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	pte_t pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	unsigned long pfn = pte_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	reload_tlb_page(vma, address, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	* The zero page is never written to, so never has any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	* cache lines, and therefore never needs to be flushed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (page == ZERO_PAGE(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		__flush_dcache_page(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if(mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		flush_aliases(mapping, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			flush_icache_page(vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		    struct page *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	__flush_icache(vaddr, vaddr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	copy_page(vto, vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	__flush_icache(vaddr, vaddr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	clear_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			unsigned long user_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			void *dst, void *src, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	__flush_dcache((unsigned long)src, (unsigned long)src + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		__flush_icache((unsigned long)src, (unsigned long)src + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			unsigned long user_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			void *dst, void *src, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	__flush_dcache((unsigned long)dst, (unsigned long)dst + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		__flush_icache((unsigned long)dst, (unsigned long)dst + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }