Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * arch/sh/mm/cache-sh7705.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 1999, 2000  Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2004  Alex Song
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/addrspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * The 32KB cache on the SH7705 suffers from the same synonym problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * as SH4 CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static inline void cache_wback_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	unsigned long ways, waysize, addrstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	ways = current_cpu_data.dcache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	waysize = current_cpu_data.dcache.sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	waysize <<= current_cpu_data.dcache.entry_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	addrstart = CACHE_OC_ADDRESS_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		for (addr = addrstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		     addr < addrstart + waysize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		     addr += current_cpu_data.dcache.linesz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			unsigned long data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			data = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			if ((data & v) == v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				__raw_writel(data & ~v, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		addrstart += current_cpu_data.dcache.way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	} while (--ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Write back the range of D-cache, and purge the I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * Called from kernel/module.c:sys_init_module and routine for a.out format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static void sh7705_flush_icache_range(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct flusher_data *data = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	start = data->addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	end = data->addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	__flush_wback_region((void *)start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * Writeback&Invalidate the D-cache of the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static void __flush_dcache_page(unsigned long phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	unsigned long ways, waysize, addrstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	phys |= SH_CACHE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * Here, phys is the physical address of the page. We check all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * tags in the cache for those with the same page number as this page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * derived from the offset within in the 4k page). Matching valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * entries are invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 * Since 2 bits of the cache index are derived from the virtual page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * number, knowing this would reduce the number of cache entries to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * searched by a factor of 4. However this function exists to deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * potential cache aliasing, therefore the optimisation is probably not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	jump_to_uncached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	ways = current_cpu_data.dcache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	waysize = current_cpu_data.dcache.sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	waysize <<= current_cpu_data.dcache.entry_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	addrstart = CACHE_OC_ADDRESS_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		for (addr = addrstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		     addr < addrstart + waysize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		     addr += current_cpu_data.dcache.linesz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			unsigned long data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		        if (data == phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 				__raw_writel(data, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		addrstart += current_cpu_data.dcache.way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	} while (--ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	back_to_cached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * Write back & invalidate the D-cache of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * (To avoid "alias" issues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void sh7705_flush_dcache_page(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct page *page = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct address_space *mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (mapping && !mapping_mapped(mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		__flush_dcache_page(__pa(page_address(page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void sh7705_flush_cache_all(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	jump_to_uncached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	cache_wback_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	back_to_cached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * Write back and invalidate I/D-caches for the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * ADDRESS: Virtual Address (U0 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void sh7705_flush_cache_page(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct flusher_data *data = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unsigned long pfn = data->addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	__flush_dcache_page(pfn << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * This is called when a page-cache page is about to be mapped into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * user process' address space.  It offers an opportunity for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * port to ensure d-cache/i-cache coherency if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * Not entirely sure why this is necessary on SH3 with 32K cache but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * without it we get occasional "Memory fault" when loading a program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void sh7705_flush_icache_page(void *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	__flush_purge_region(page_address(page), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void __init sh7705_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	local_flush_icache_range	= sh7705_flush_icache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	local_flush_dcache_page		= sh7705_flush_dcache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	local_flush_cache_all		= sh7705_flush_cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	local_flush_cache_mm		= sh7705_flush_cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	local_flush_cache_dup_mm	= sh7705_flush_cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	local_flush_cache_range		= sh7705_flush_cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	local_flush_cache_page		= sh7705_flush_cache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	local_flush_icache_page		= sh7705_flush_icache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }