Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * arch/sh/mm/cache.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2002 - 2010  Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) void (*local_flush_cache_all)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) void (*local_flush_cache_mm)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) void (*local_flush_cache_page)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) void (*local_flush_cache_range)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) void (*local_flush_dcache_page)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) void (*local_flush_icache_range)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) void (*local_flush_icache_page)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) void (*__flush_wback_region)(void *start, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) EXPORT_SYMBOL(__flush_wback_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) void (*__flush_purge_region)(void *start, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) EXPORT_SYMBOL(__flush_purge_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) void (*__flush_invalidate_region)(void *start, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) EXPORT_SYMBOL(__flush_invalidate_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static inline void noop__flush_region(void *start, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)                                    int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	/* Needing IPI for cross-core flush is SHX3-specific. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #ifdef CONFIG_CPU_SHX3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * It's possible that this gets called early on when IRQs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * still disabled due to ioremapping by the boot CPU, so don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 * even attempt IPIs unless there are other CPUs online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (num_online_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		smp_call_function(func, info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		       unsigned long vaddr, void *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		       unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	    test_bit(PG_dcache_clean, &page->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		memcpy(vto, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		kunmap_coherent(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		if (boot_cpu_data.dcache.n_aliases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		flush_cache_page(vma, vaddr, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			 unsigned long vaddr, void *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			 unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	    test_bit(PG_dcache_clean, &page->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		memcpy(dst, vfrom, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		kunmap_coherent(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		memcpy(dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (boot_cpu_data.dcache.n_aliases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) void copy_user_highpage(struct page *to, struct page *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			unsigned long vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	void *vfrom, *vto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	vto = kmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	    test_bit(PG_dcache_clean, &from->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		vfrom = kmap_coherent(from, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		copy_page(vto, vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		kunmap_coherent(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		vfrom = kmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		copy_page(vto, vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		kunmap_atomic(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	    (vma->vm_flags & VM_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		__flush_purge_region(vto, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	kunmap_atomic(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/* Make sure this page is cleared on other CPU's too before using it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) EXPORT_SYMBOL(copy_user_highpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void clear_user_highpage(struct page *page, unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	void *kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	clear_page(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		__flush_purge_region(kaddr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) EXPORT_SYMBOL(clear_user_highpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void __update_cache(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		    unsigned long address, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	unsigned long pfn = pte_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (!boot_cpu_data.dcache.n_aliases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (pfn_valid(pfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		if (dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			__flush_purge_region(page_address(page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void __flush_anon_page(struct page *page, unsigned long vmaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	unsigned long addr = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (pages_do_alias(addr, vmaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		    test_bit(PG_dcache_clean, &page->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			kaddr = kmap_coherent(page, vmaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			/* XXX.. For now kunmap_coherent() does a purge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			kunmap_coherent(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			__flush_purge_region((void *)addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) EXPORT_SYMBOL(flush_cache_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (boot_cpu_data.dcache.n_aliases == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void flush_cache_dup_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (boot_cpu_data.dcache.n_aliases == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		      unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct flusher_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	data.vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	data.addr1 = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	data.addr2 = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		       unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct flusher_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	data.vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	data.addr1 = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	data.addr2 = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL(flush_cache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) EXPORT_SYMBOL(flush_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	struct flusher_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	data.vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	data.addr1 = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	data.addr2 = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) EXPORT_SYMBOL(flush_icache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) void flush_icache_page(struct vm_area_struct *vma, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/* Nothing uses the VMA, so just pass the struct page along */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void flush_cache_sigtramp(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void compute_alias(struct cache_info *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	c->alias_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void __init emit_cache_params(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		boot_cpu_data.icache.ways,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		boot_cpu_data.icache.sets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		boot_cpu_data.icache.way_incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		boot_cpu_data.icache.entry_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		boot_cpu_data.icache.alias_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		boot_cpu_data.icache.n_aliases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		boot_cpu_data.dcache.ways,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		boot_cpu_data.dcache.sets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		boot_cpu_data.dcache.way_incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		boot_cpu_data.dcache.entry_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		boot_cpu_data.dcache.alias_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		boot_cpu_data.dcache.n_aliases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * Emit Secondary Cache parameters if the CPU has a probed L2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 			boot_cpu_data.scache.ways,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			boot_cpu_data.scache.sets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			boot_cpu_data.scache.way_incr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			boot_cpu_data.scache.entry_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			boot_cpu_data.scache.alias_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			boot_cpu_data.scache.n_aliases);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) void __init cpu_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	unsigned int cache_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifdef SH_CCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	compute_alias(&boot_cpu_data.icache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	compute_alias(&boot_cpu_data.dcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	compute_alias(&boot_cpu_data.scache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	__flush_wback_region		= noop__flush_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	__flush_purge_region		= noop__flush_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	__flush_invalidate_region	= noop__flush_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * No flushing is necessary in the disabled cache case so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * just keep the noop functions in local_flush_..() and __flush_..()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (unlikely(cache_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (boot_cpu_data.type == CPU_J2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		extern void __weak j2_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		j2_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	} else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		extern void __weak sh2_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		sh2_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		extern void __weak sh2a_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		sh2a_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		extern void __weak sh3_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		sh3_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		if ((boot_cpu_data.type == CPU_SH7705) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		    (boot_cpu_data.dcache.sets == 512)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			extern void __weak sh7705_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			sh7705_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		extern void __weak sh4_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		sh4_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		if ((boot_cpu_data.type == CPU_SH7786) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		    (boot_cpu_data.type == CPU_SHX3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			extern void __weak shx3_cache_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			shx3_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	emit_cache_params();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }