Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * arch/sh/mm/cache-sh4.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2001 - 2009  Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2003  Richard Curnow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/cache_insns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * The maximum number of pages we support up to when doing ranged dcache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * flushing. Anything exceeding this will simply flush the dcache in its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * entirety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define MAX_ICACHE_PAGES	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static void __flush_cache_one(unsigned long addr, unsigned long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 			       unsigned long exec_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * Write back the range of D-cache, and purge the I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * Called from kernel/module.c:sys_init_module and routine for a.out format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * signal handler code and kprobes code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static void sh4_flush_icache_range(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct flusher_data *data = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	unsigned long flags, v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	start = data->addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	end = data->addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* If there are too many pages then just blow away the caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		local_flush_cache_all(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * Selectively flush d-cache then invalidate the i-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * This is inefficient, so only use this for small ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	start &= ~(L1_CACHE_BYTES-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	end += L1_CACHE_BYTES-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	end &= ~(L1_CACHE_BYTES-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	jump_to_uncached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	for (v = start; v < end; v += L1_CACHE_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		unsigned long icacheaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		int j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		__ocbwb(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 				cpu_data->icache.entry_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		/* Clear i-cache line valid-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		n = boot_cpu_data.icache.n_aliases;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		for (i = 0; i < cpu_data->icache.ways; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			for (j = 0; j < n; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 				__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			icacheaddr += cpu_data->icache.way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	back_to_cached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static inline void flush_cache_one(unsigned long start, unsigned long phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned long flags, exec_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 * All types of SH-4 require PC to be uncached to operate on the I-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * Some types of SH-4 require PC to be uncached to operate on the D-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	    (start < CACHE_OC_ADDRESS_ARRAY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		exec_offset = cached_to_uncached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	__flush_cache_one(start, phys, exec_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Write back & invalidate the D-cache of the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * (To avoid "alias" issues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void sh4_flush_dcache_page(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct page *page = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	unsigned long addr = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct address_space *mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (mapping && !mapping_mapped(mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		clear_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 				(addr & shm_align_mask), page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* TODO: Selective icache invalidation through IC address array.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void flush_icache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsigned long flags, ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	jump_to_uncached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/* Flush I-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ccr = __raw_readl(SH_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ccr |= CCR_CACHE_ICI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	__raw_writel(ccr, SH_CCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * back_to_cached() will take care of the barrier for us, don't add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * another one!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	back_to_cached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void flush_dcache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	unsigned long addr, end_addr, entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	end_addr = CACHE_OC_ADDRESS_ARRAY +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		(current_cpu_data.dcache.sets <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		 current_cpu_data.dcache.entry_shift) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			current_cpu_data.dcache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	entry_offset = 1 << current_cpu_data.dcache.entry_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		__raw_writel(0, addr); addr += entry_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void sh4_flush_cache_all(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	flush_dcache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * Note : (RPC) since the caches are physically tagged, the only point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * of flush_cache_mm for SH-4 is to get rid of aliases from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * lines can stay resident so long as the virtual address they were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * accessed with (hence cache set) is in accord with the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * address (i.e. tag).  It's no different here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Caller takes mm->mmap_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void sh4_flush_cache_mm(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct mm_struct *mm = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	flush_dcache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * Write back and invalidate I/D-caches for the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * ADDR: Virtual Address (U0 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * PFN: Physical page number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void sh4_flush_cache_page(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct flusher_data *data = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	unsigned long address, pfn, phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	int map_coherent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	vma = data->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	address = data->addr1 & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	pfn = data->addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	phys = pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	pmd = pmd_off(vma->vm_mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	pte = pte_offset_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	/* If the page isn't present, there is nothing to do here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!(pte_val(*pte) & _PAGE_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if ((vma->vm_mm == current->active_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		 * Use kmap_coherent or kmap_atomic to do flushes for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		 * another ASID than the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		map_coherent = (current_cpu_data.dcache.n_aliases &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			test_bit(PG_dcache_clean, &page->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			page_mapcount(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		if (map_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			vaddr = kmap_coherent(page, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			vaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		address = (unsigned long)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			(address & shm_align_mask), phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		if (map_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			kunmap_coherent(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * Write back and invalidate D-caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * START, END: Virtual Address (U0 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * NOTE: We need to flush the _physical_ page entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * Flushing the cache lines for U0 only isn't enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * We need to flush for P1 too, which may contain aliases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void sh4_flush_cache_range(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct flusher_data *data = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	vma = data->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	start = data->addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	end = data->addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	 * the cache is physically tagged, the data can just be left in there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (boot_cpu_data.dcache.n_aliases == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	flush_dcache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		flush_icache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * __flush_cache_one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * @addr:  address in memory mapped cache array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  *         set i.e. associative write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  *               region else 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * The offset into the cache array implied by 'addr' selects the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * 'colour' of the virtual address range that will be flushed.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * operation (purge/write-back) is selected by the lower 2 bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * 'phys'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static void __flush_cache_one(unsigned long addr, unsigned long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			       unsigned long exec_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	int way_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	unsigned long base_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct cache_info *dcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	unsigned long way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	unsigned long a, ea, p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	unsigned long temp_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	dcache = &boot_cpu_data.dcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	/* Write this way for better assembly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	way_count = dcache->ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	way_incr = dcache->way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * Apply exec_offset (i.e. branch to P2 if required.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * FIXME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 *	trashing exec_offset before it's been added on - why?  Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	 *	"=&r" as a 'workaround'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	asm volatile("mov.l 1f, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		     "add   %1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		     "jmp   @%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		     "nop\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		     ".balign 4\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		     "1:  .long 2f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	 * We know there will be >=1 iteration, so write as do-while to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	 * pointless nead-of-loop check for 0 iterations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		ea = base_addr + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		a = base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		p = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			*(volatile unsigned long *)a = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			 * Next line: intentionally not p+32, saves an add, p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			 * will do since only the cache tag bits need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			 * match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			*(volatile unsigned long *)(a+32) = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			a += 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			p += 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		} while (a < ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		base_addr += way_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	} while (--way_count != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) extern void __weak sh4__flush_region_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * SH-4 has virtually indexed and physically tagged cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void __init sh4_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		__raw_readl(CCN_PVR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		__raw_readl(CCN_CVR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		__raw_readl(CCN_PRR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	local_flush_icache_range	= sh4_flush_icache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	local_flush_dcache_page		= sh4_flush_dcache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	local_flush_cache_all		= sh4_flush_cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	local_flush_cache_mm		= sh4_flush_cache_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	local_flush_cache_dup_mm	= sh4_flush_cache_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	local_flush_cache_page		= sh4_flush_cache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	local_flush_cache_range		= sh4_flush_cache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	sh4__flush_region_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }