^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/bcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/bootinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cacheops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/cpu-features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/cpu-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/r4kcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/war.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/cacheflush.h> /* for run_uncached() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/dma-coherence.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/mips-cps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Bits describing what cache ops an SMP callback function may perform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * R4K_HIT - Virtual user or kernel address based cache operations. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * active_mm must be checked before using user addresses, falling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * back to kmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * R4K_INDEX - Index based cache operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define R4K_HIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define R4K_INDEX BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @type: Type of cache operations (R4K_HIT or R4K_INDEX).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Decides whether a cache op needs to be performed on every core in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * This may change depending on the @type of cache operation, as well as the set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * of online CPUs, so preemption should be disabled by the caller to prevent CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * hotplug from changing the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Returns: 1 if the cache operation @type should be done on every core in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * 0 if the cache operation @type is globalized and only needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * be performed on a simple CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline bool r4k_op_needs_ipi(unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (type == R4K_HIT && mips_cm_present())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Hardware doesn't globalize the required cache ops, so SMP calls may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * be needed, but only if there are foreign CPUs (non-siblings with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * separate caches).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return !cpumask_empty(&cpu_foreign_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Special Variant of smp_call_function for use by cache functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * o No return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * o collapses to normal function call on UP kernels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * o collapses to normal function call on systems with a single shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * primary cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * o doesn't disable interrupts on the local CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline void r4k_on_each_cpu(unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void (*func)(void *info), void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (r4k_op_needs_ipi(type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) func, info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Must die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static unsigned long icache_size __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static unsigned long dcache_size __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static unsigned long vcache_size __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static unsigned long scache_size __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Dummy cache handling routines for machines without boardcaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void cache_noop(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static struct bcache_ops no_sc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .bc_enable = (void *)cache_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .bc_disable = (void *)cache_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .bc_wback_inv = (void *)cache_noop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .bc_inv = (void *)cache_noop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct bcache_ops *bcops = &no_sc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define R4600_HIT_CACHEOP_WAR_IMPL \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) cpu_is_r4600_v2_x()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *(volatile unsigned long *)CKSEG1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __asm__ __volatile__("nop;nop;nop;nop"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void (*r4k_blast_dcache_page)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) R4600_HIT_CACHEOP_WAR_IMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) blast_dcache32_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) blast_dcache64_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) blast_dcache128_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void r4k_blast_dcache_page_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long dc_lsize = cpu_dcache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) switch (dc_lsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) r4k_blast_dcache_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) r4k_blast_dcache_page = blast_dcache16_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #ifndef CONFIG_EVA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define r4k_blast_dcache_user_page r4k_blast_dcache_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void (*r4k_blast_dcache_user_page)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void r4k_blast_dcache_user_page_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long dc_lsize = cpu_dcache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (dc_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) r4k_blast_dcache_user_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) else if (dc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) r4k_blast_dcache_user_page = blast_dcache16_user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) else if (dc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) r4k_blast_dcache_user_page = blast_dcache32_user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) else if (dc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) r4k_blast_dcache_user_page = blast_dcache64_user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void r4k_blast_dcache_page_indexed_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned long dc_lsize = cpu_dcache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (dc_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) r4k_blast_dcache_page_indexed = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) else if (dc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) else if (dc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) else if (dc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) else if (dc_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void (* r4k_blast_dcache)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) EXPORT_SYMBOL(r4k_blast_dcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void r4k_blast_dcache_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long dc_lsize = cpu_dcache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (dc_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) r4k_blast_dcache = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) else if (dc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) r4k_blast_dcache = blast_dcache16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) else if (dc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) r4k_blast_dcache = blast_dcache32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) else if (dc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) r4k_blast_dcache = blast_dcache64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) else if (dc_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) r4k_blast_dcache = blast_dcache128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define JUMP_TO_ALIGN(order) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) "b\t1f\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ".align\t" #order "\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) "1:\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline void blast_r4600_v1_icache32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) blast_icache32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static inline void tx49_blast_icache32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long start = INDEX_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned long end = start + current_cpu_data.icache.waysize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long ws_end = current_cpu_data.icache.ways <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) current_cpu_data.icache.waybit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long ws, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) CACHE32_UNROLL32_ALIGN2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* I'm in even chunk. blast odd chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (ws = 0; ws < ws_end; ws += ws_inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cache_unroll(32, kernel_cache, Index_Invalidate_I,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) addr | ws, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) CACHE32_UNROLL32_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* I'm in odd chunk. blast even chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) for (ws = 0; ws < ws_end; ws += ws_inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) for (addr = start; addr < end; addr += 0x400 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cache_unroll(32, kernel_cache, Index_Invalidate_I,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) addr | ws, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) blast_icache32_page_indexed(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline void tx49_blast_icache32_page_indexed(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned long indexmask = current_cpu_data.icache.waysize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long start = INDEX_BASE + (page & indexmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned long end = start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned long ws_end = current_cpu_data.icache.ways <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) current_cpu_data.icache.waybit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long ws, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) CACHE32_UNROLL32_ALIGN2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* I'm in even chunk. blast odd chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) for (ws = 0; ws < ws_end; ws += ws_inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) cache_unroll(32, kernel_cache, Index_Invalidate_I,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) addr | ws, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) CACHE32_UNROLL32_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* I'm in odd chunk. blast even chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (ws = 0; ws < ws_end; ws += ws_inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (addr = start; addr < end; addr += 0x400 * 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) cache_unroll(32, kernel_cache, Index_Invalidate_I,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) addr | ws, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void (* r4k_blast_icache_page)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void r4k_blast_icache_page_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long ic_lsize = cpu_icache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (ic_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) r4k_blast_icache_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) else if (ic_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) r4k_blast_icache_page = blast_icache16_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) r4k_blast_icache_page = loongson2_blast_icache32_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) else if (ic_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) r4k_blast_icache_page = blast_icache32_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) else if (ic_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) r4k_blast_icache_page = blast_icache64_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) else if (ic_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) r4k_blast_icache_page = blast_icache128_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #ifndef CONFIG_EVA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #define r4k_blast_icache_user_page r4k_blast_icache_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void (*r4k_blast_icache_user_page)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void r4k_blast_icache_user_page_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) unsigned long ic_lsize = cpu_icache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (ic_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) r4k_blast_icache_user_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) else if (ic_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) r4k_blast_icache_user_page = blast_icache16_user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else if (ic_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) r4k_blast_icache_user_page = blast_icache32_user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) else if (ic_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) r4k_blast_icache_user_page = blast_icache64_user_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void r4k_blast_icache_page_indexed_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned long ic_lsize = cpu_icache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (ic_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) r4k_blast_icache_page_indexed = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) else if (ic_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) else if (ic_lsize == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) cpu_is_r4600_v1_x())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) r4k_blast_icache_page_indexed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) blast_icache32_r4600_v1_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) r4k_blast_icache_page_indexed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) tx49_blast_icache32_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) else if (current_cpu_type() == CPU_LOONGSON2EF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) r4k_blast_icache_page_indexed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) loongson2_blast_icache32_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) r4k_blast_icache_page_indexed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) blast_icache32_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) } else if (ic_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void (* r4k_blast_icache)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) EXPORT_SYMBOL(r4k_blast_icache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void r4k_blast_icache_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long ic_lsize = cpu_icache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (ic_lsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) r4k_blast_icache = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) else if (ic_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) r4k_blast_icache = blast_icache16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) else if (ic_lsize == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cpu_is_r4600_v1_x())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) r4k_blast_icache = blast_r4600_v1_icache32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) r4k_blast_icache = tx49_blast_icache32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else if (current_cpu_type() == CPU_LOONGSON2EF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) r4k_blast_icache = loongson2_blast_icache32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) r4k_blast_icache = blast_icache32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) } else if (ic_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) r4k_blast_icache = blast_icache64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) else if (ic_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) r4k_blast_icache = blast_icache128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void (* r4k_blast_scache_page)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void r4k_blast_scache_page_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long sc_lsize = cpu_scache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (scache_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) r4k_blast_scache_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) else if (sc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) r4k_blast_scache_page = blast_scache16_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) else if (sc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) r4k_blast_scache_page = blast_scache32_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) else if (sc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) r4k_blast_scache_page = blast_scache64_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) else if (sc_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) r4k_blast_scache_page = blast_scache128_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void r4k_blast_scache_page_indexed_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned long sc_lsize = cpu_scache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (scache_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) r4k_blast_scache_page_indexed = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else if (sc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) else if (sc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) else if (sc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) else if (sc_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static void (* r4k_blast_scache)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void r4k_blast_scache_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned long sc_lsize = cpu_scache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (scache_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) r4k_blast_scache = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) else if (sc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) r4k_blast_scache = blast_scache16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) else if (sc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) r4k_blast_scache = blast_scache32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) else if (sc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) r4k_blast_scache = blast_scache64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) else if (sc_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) r4k_blast_scache = blast_scache128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void (*r4k_blast_scache_node)(long node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void r4k_blast_scache_node_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned long sc_lsize = cpu_scache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (current_cpu_type() != CPU_LOONGSON64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) r4k_blast_scache_node = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) else if (sc_lsize == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) r4k_blast_scache_node = blast_scache16_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) else if (sc_lsize == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) r4k_blast_scache_node = blast_scache32_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) else if (sc_lsize == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) r4k_blast_scache_node = blast_scache64_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) else if (sc_lsize == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) r4k_blast_scache_node = blast_scache128_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static inline void local_r4k___flush_cache_all(void * args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case CPU_LOONGSON2EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case CPU_R4000SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case CPU_R4000MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) case CPU_R4400SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case CPU_R4400MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) case CPU_R10000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) case CPU_R12000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) case CPU_R14000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) case CPU_R16000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * These caches are inclusive caches, that is, if something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * is not cached in the S-cache, we know it also won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * in one of the primary caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) r4k_blast_scache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) case CPU_LOONGSON64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* Use get_ebase_cpunum() for both NUMA=y/n */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) r4k_blast_scache_node(get_ebase_cpunum() >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) case CPU_BMIPS5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) r4k_blast_scache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) r4k_blast_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void r4k___flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * has_valid_asid() - Determine if an mm already has an ASID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @mm: Memory map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @type: R4K_HIT or R4K_INDEX, type of cache op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Determines whether @mm already has an ASID on any of the CPUs which cache ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * of type @type within an r4k_on_each_cpu() call will affect. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * will need to be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Must be called in non-preemptive context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) const cpumask_t *mask = cpu_present_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (cpu_has_mmid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return cpu_context(0, mm) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * each foreign core, so we only need to worry about siblings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Otherwise we need to worry about all present CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (r4k_op_needs_ipi(type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mask = &cpu_sibling_map[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) for_each_cpu(i, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (cpu_context(i, mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void r4k__flush_cache_vmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void r4k__flush_cache_vunmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * whole caches when vma is executable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static inline void local_r4k_flush_cache_range(void * args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct vm_area_struct *vma = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int exec = vma->vm_flags & VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * If dcache can alias, we must blast it since mapping is changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * If executable, we must ensure any dirty lines are written back far
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * enough to be visible to icache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* If executable, blast stale lines from icache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) r4k_blast_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static void r4k_flush_cache_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) int exec = vma->vm_flags & VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (cpu_has_dc_aliases || exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static inline void local_r4k_flush_cache_mm(void * args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct mm_struct *mm = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!has_valid_asid(mm, R4K_INDEX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * only flush the primary caches but R1x000 behave sane ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * R4000SC and R4400SC indexed S-cache ops also invalidate primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * caches, so we can bail out early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (current_cpu_type() == CPU_R4000SC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) current_cpu_type() == CPU_R4000MC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) current_cpu_type() == CPU_R4400SC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) current_cpu_type() == CPU_R4400MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) r4k_blast_scache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void r4k_flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!cpu_has_dc_aliases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct flush_cache_page_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static inline void local_r4k_flush_cache_page(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct flush_cache_page_args *fcp_args = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct vm_area_struct *vma = fcp_args->vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned long addr = fcp_args->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct page *page = pfn_to_page(fcp_args->pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int exec = vma->vm_flags & VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int map_coherent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * If owns no valid ASID yet, cannot possibly have gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * this page into the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!has_valid_asid(mm, R4K_HIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) addr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pmdp = pmd_off(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ptep = pte_offset_kernel(pmdp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * If the page isn't marked valid, the page cannot possibly be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!(pte_present(*ptep)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Use kmap_coherent or kmap_atomic to do flushes for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * another ASID than the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) map_coherent = (cpu_has_dc_aliases &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) page_mapcount(page) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) !Page_dcache_dirty(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (map_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) vaddr = kmap_coherent(page, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) vaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) addr = (unsigned long)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) vaddr ? r4k_blast_dcache_page(addr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) r4k_blast_dcache_user_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (exec && !cpu_icache_snoops_remote_store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) r4k_blast_scache_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (exec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) drop_mmu_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) vaddr ? r4k_blast_icache_page(addr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) r4k_blast_icache_user_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (map_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) kunmap_coherent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static void r4k_flush_cache_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned long addr, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct flush_cache_page_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) args.vma = vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) args.addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) args.pfn = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static inline void local_r4k_flush_data_cache_page(void * addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) r4k_blast_dcache_page((unsigned long) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void r4k_flush_data_cache_page(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (in_atomic())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) local_r4k_flush_data_cache_page((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) (void *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct flush_icache_range_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) bool user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static inline void __local_r4k_flush_icache_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) bool user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!cpu_has_ic_fills_f_dc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (type == R4K_INDEX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) (type & R4K_INDEX && end - start >= dcache_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) R4600_HIT_CACHEOP_WAR_IMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) protected_blast_dcache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) blast_dcache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (type == R4K_INDEX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) (type & R4K_INDEX && end - start > icache_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) r4k_blast_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) switch (boot_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) case CPU_LOONGSON2EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) protected_loongson2_blast_icache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) protected_blast_icache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) blast_icache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static inline void local_r4k_flush_icache_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static inline void local_r4k_flush_icache_user_range(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static inline void local_r4k_flush_icache_range_ipi(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct flush_icache_range_args *fir_args = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unsigned long start = fir_args->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) unsigned long end = fir_args->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) unsigned int type = fir_args->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) bool user = fir_args->user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) __local_r4k_flush_icache_range(start, end, type, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) bool user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct flush_icache_range_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned long size, cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) args.start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) args.end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) args.type = R4K_HIT | R4K_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) args.user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Indexed cache ops require an SMP call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * Consider if that can or should be avoided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * If address-based cache ops don't require an SMP call, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * use them exclusively for small flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) size = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cache_size = icache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!cpu_has_ic_fills_f_dc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cache_size += dcache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (size <= cache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) args.type &= ~R4K_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) instruction_hazard();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static void r4k_flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return __r4k_flush_icache_range(start, end, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return __r4k_flush_icache_range(start, end, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #ifdef CONFIG_DMA_NONCOHERENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* Catch bad driver code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (WARN_ON(size == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (cpu_has_inclusive_pcaches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (size >= scache_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (current_cpu_type() != CPU_LOONGSON64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) r4k_blast_scache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) r4k_blast_scache_node(pa_to_nid(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) blast_scache_range(addr, addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * Either no secondary cache or the available caches don't have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * subset property so we have to flush the primary caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * If we would need IPI to perform an INDEX-type operation, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * we have to use the HIT-type alternative as IPI cannot be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * here due to interrupts possibly being disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) R4600_HIT_CACHEOP_WAR_IMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) blast_dcache_range(addr, addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) bc_wback_inv(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static void prefetch_cache_inv(unsigned long addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned int linesz = cpu_scache_line_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unsigned long addr0 = addr, addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) addr0 &= ~(linesz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) addr1 = (addr0 + size - 1) & ~(linesz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) protected_writeback_scache_line(addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (likely(addr1 != addr0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) protected_writeback_scache_line(addr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) addr0 += linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (likely(addr1 != addr0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) protected_writeback_scache_line(addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) addr1 -= linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (likely(addr1 > addr0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) protected_writeback_scache_line(addr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Catch bad driver code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (WARN_ON(size == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (current_cpu_type() == CPU_BMIPS5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) prefetch_cache_inv(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (cpu_has_inclusive_pcaches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (size >= scache_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (current_cpu_type() != CPU_LOONGSON64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) r4k_blast_scache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) r4k_blast_scache_node(pa_to_nid(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * There is no clearly documented alignment requirement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * for the cache instruction on MIPS processors and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * some processors, among them the RM5200 and RM7000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * QED processors will throw an address error for cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * hit ops with insufficient alignment. Solved by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * aligning the address to cache line size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) blast_inv_scache_range(addr, addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) R4600_HIT_CACHEOP_WAR_IMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) blast_inv_dcache_range(addr, addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) bc_inv(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #endif /* CONFIG_DMA_NONCOHERENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static void r4k_flush_icache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (cpu_has_vtag_icache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) r4k_blast_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct flush_kernel_vmap_range_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * Aliases only affect the primary caches so don't bother with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * S-caches or T-caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) r4k_blast_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static inline void local_r4k_flush_kernel_vmap_range(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct flush_kernel_vmap_range_args *vmra = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) unsigned long vaddr = vmra->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int size = vmra->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Aliases only affect the primary caches so don't bother with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * S-caches or T-caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) R4600_HIT_CACHEOP_WAR_IMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) blast_dcache_range(vaddr, vaddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct flush_kernel_vmap_range_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) args.vaddr = (unsigned long) vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) args.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (size >= dcache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) r4k_on_each_cpu(R4K_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) local_r4k_flush_kernel_vmap_range_index, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static inline void rm7k_erratum31(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) const unsigned long ic_lsize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* RM7000 erratum #31. The icache is screwed at startup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) write_c0_taglo(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) write_c0_taghi(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ".set push\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ".set noreorder\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ".set mips3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) "cache\t%1, 0(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) "cache\t%1, 0x1000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) "cache\t%1, 0x2000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) "cache\t%1, 0x3000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) "cache\t%2, 0(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) "cache\t%2, 0x1000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) "cache\t%2, 0x2000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) "cache\t%2, 0x3000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) "cache\t%1, 0(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) "cache\t%1, 0x1000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) "cache\t%1, 0x2000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) "cache\t%1, 0x3000(%0)\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ".set pop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static inline int alias_74k_erratum(struct cpuinfo_mips *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned int imp = c->processor_id & PRID_IMP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) unsigned int rev = c->processor_id & PRID_REV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) int present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * Early versions of the 74K do not update the cache tags on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * aliases. In this case it is better to treat the cache as always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * having aliases. Also disable the synonym tag update feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * where available. In this case no opportunistic tag update will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * happen where a load causes a virtual address miss but a physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * address hit during a D-cache look-up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) switch (imp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) case PRID_IMP_74K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (rev == PRID_REV_ENCODE_332(2, 4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) case PRID_IMP_1074K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void b5k_instruction_hazard(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) __sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) " nop; nop; nop; nop; nop; nop; nop; nop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) " nop; nop; nop; nop; nop; nop; nop; nop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) " nop; nop; nop; nop; nop; nop; nop; nop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) " nop; nop; nop; nop; nop; nop; nop; nop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static char *way_string[] = { NULL, "direct mapped", "2-way",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) "9-way", "10-way", "11-way", "12-way",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) "13-way", "14-way", "15-way", "16-way",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void probe_pcache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) unsigned int config = read_c0_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) unsigned int prid = read_c0_prid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int has_74k_erratum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) unsigned long config1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) unsigned int lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) case CPU_R4600: /* QED style two way caches? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) case CPU_R4700:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) case CPU_R5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) case CPU_NEVADA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) c->icache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) c->icache.waybit = __ffs(icache_size/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) c->dcache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) c->dcache.waybit= __ffs(dcache_size/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) c->options |= MIPS_CPU_CACHE_CDEX_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case CPU_R5500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) c->icache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) c->icache.waybit= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) c->dcache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) c->dcache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) case CPU_TX49XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) c->icache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) c->icache.waybit= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) c->dcache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) c->dcache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) c->options |= MIPS_CPU_CACHE_CDEX_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) c->options |= MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) case CPU_R4000PC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) case CPU_R4000SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) case CPU_R4000MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) case CPU_R4400PC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) case CPU_R4400SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) case CPU_R4400MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) c->icache.ways = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) c->icache.waybit = 0; /* doesn't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) c->dcache.ways = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) c->dcache.waybit = 0; /* does not matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) c->options |= MIPS_CPU_CACHE_CDEX_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) case CPU_R10000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) case CPU_R12000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) case CPU_R14000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) case CPU_R16000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) c->icache.linesz = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) c->icache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) c->icache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) c->dcache.linesz = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) c->dcache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) c->dcache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) c->options |= MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) case CPU_VR4133:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) write_c0_config(config & ~VR41_CONF_P4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) case CPU_VR4131:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Workaround for cache instruction bug of VR4131 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) c->processor_id == 0x0c82U) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) config |= 0x00400000U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (c->processor_id == 0x0c80U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) config |= VR41_CONF_BP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) write_c0_config(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) c->options |= MIPS_CPU_CACHE_CDEX_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) c->icache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) c->icache.waybit = __ffs(icache_size/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) c->dcache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) c->dcache.waybit = __ffs(dcache_size/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) case CPU_VR41XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) case CPU_VR4111:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) case CPU_VR4121:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) case CPU_VR4122:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) case CPU_VR4181:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) case CPU_VR4181A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) c->icache.ways = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) c->icache.waybit = 0; /* doesn't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) c->dcache.ways = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) c->dcache.waybit = 0; /* does not matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) c->options |= MIPS_CPU_CACHE_CDEX_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) case CPU_RM7000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) rm7k_erratum31();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) c->icache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) c->icache.waybit = __ffs(icache_size / c->icache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) c->dcache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) c->options |= MIPS_CPU_CACHE_CDEX_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) c->options |= MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) case CPU_LOONGSON2EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (prid & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) c->icache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) c->icache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) c->icache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (prid & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) c->dcache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) c->dcache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) c->dcache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) case CPU_LOONGSON64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) config1 = read_c0_config1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) lsize = (config1 >> 19) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (lsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) c->icache.linesz = 2 << lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) c->icache.linesz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) c->icache.sets = 64 << ((config1 >> 22) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) c->icache.ways = 1 + ((config1 >> 16) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) icache_size = c->icache.sets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) c->icache.ways *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) c->icache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) c->icache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) lsize = (config1 >> 10) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (lsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) c->dcache.linesz = 2 << lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) c->dcache.linesz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) c->dcache.sets = 64 << ((config1 >> 13) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) c->dcache.ways = 1 + ((config1 >> 7) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dcache_size = c->dcache.sets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) c->dcache.ways *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) c->dcache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) c->dcache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) c->options |= MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* For now lie about the number of ways. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) c->icache.linesz = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) c->icache.sets = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) c->icache.ways = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) c->icache.flags |= MIPS_CACHE_VTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) c->dcache.linesz = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) c->dcache.ways = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) c->dcache.sets = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) c->options |= MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (!(config & MIPS_CONF_M))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) panic("Don't know how to probe P-caches on this cpu.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * So we seem to be a MIPS32 or MIPS64 CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * So let's probe the I-cache ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) config1 = read_c0_config1();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) lsize = (config1 >> 19) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* IL == 7 is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (lsize == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) panic("Invalid icache line size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) c->icache.linesz = lsize ? 2 << lsize : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) c->icache.ways = 1 + ((config1 >> 16) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) icache_size = c->icache.sets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) c->icache.ways *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) c->icache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) c->icache.waybit = __ffs(icache_size/c->icache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (config & MIPS_CONF_VI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) c->icache.flags |= MIPS_CACHE_VTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * Now probe the MIPS32 / MIPS64 data cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) c->dcache.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) lsize = (config1 >> 10) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* DL == 7 is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (lsize == 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) panic("Invalid dcache line size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) c->dcache.linesz = lsize ? 2 << lsize : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) c->dcache.ways = 1 + ((config1 >> 7) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dcache_size = c->dcache.sets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) c->dcache.ways *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) c->dcache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) c->options |= MIPS_CPU_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * Processor configuration sanity check for the R4000SC erratum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * #5. With page sizes larger than 32kB there is no possibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * to get a VCE exception anymore so we don't care about this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * misconfiguration. The case is rather theoretical anyway;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * presumably no vendor is shipping his hardware in the "bad"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) !(config & CONF_SC) && c->icache.linesz != 16 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) PAGE_SIZE <= 0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) panic("Improper R4000SC processor configuration detected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* compute a couple of other cache variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) c->icache.waysize = icache_size / c->icache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) c->dcache.waysize = dcache_size / c->dcache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) c->icache.sets = c->icache.linesz ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) icache_size / (c->icache.linesz * c->icache.ways) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) c->dcache.sets = c->dcache.linesz ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * R1x000 P-caches are odd in a positive way. They're 32kB 2-way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * virtually indexed so normally would suffer from aliases. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * normally they'd suffer from aliases but magic in the hardware deals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * with that for us so we don't need to take care ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) case CPU_20KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) case CPU_25KF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) case CPU_I6400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) case CPU_I6500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) case CPU_SB1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) case CPU_SB1A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) case CPU_XLR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) c->dcache.flags |= MIPS_CACHE_PINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) case CPU_R10000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) case CPU_R12000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) case CPU_R14000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) case CPU_R16000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) case CPU_74K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) case CPU_1074K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) has_74k_erratum = alias_74k_erratum(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) case CPU_M14KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case CPU_M14KEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) case CPU_24K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) case CPU_34K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) case CPU_1004K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) case CPU_INTERAPTIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) case CPU_P5600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) case CPU_PROAPTIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) case CPU_M5150:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) case CPU_QEMU_GENERIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) case CPU_P6600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) case CPU_M6250:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) (c->icache.waysize > PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) c->icache.flags |= MIPS_CACHE_ALIASES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * Effectively physically indexed dcache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * thus no virtual aliases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) c->dcache.flags |= MIPS_CACHE_PINDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) c->dcache.flags |= MIPS_CACHE_ALIASES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* Physically indexed caches don't suffer from virtual aliasing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (c->dcache.flags & MIPS_CACHE_PINDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) c->dcache.flags &= ~MIPS_CACHE_ALIASES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * In systems with CM the icache fills from L2 or closer caches, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * thus sees remote stores without needing to write them back any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * further than that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (mips_cm_present())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) case CPU_20KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * Some older 20Kc chips doesn't have the 'VI' bit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * the config register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) c->icache.flags |= MIPS_CACHE_VTAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) case CPU_ALCHEMY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) case CPU_I6400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) case CPU_I6500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) c->icache.flags |= MIPS_CACHE_IC_F_DC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) case CPU_BMIPS5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) c->icache.flags |= MIPS_CACHE_IC_F_DC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* Cache aliases are handled in hardware; allow HIGHMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) c->dcache.flags &= ~MIPS_CACHE_ALIASES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) case CPU_LOONGSON2EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * LOONGSON2 has 4 way icache, but when using indexed cache op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * one op will act on all 4 ways
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) c->icache.ways = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) icache_size >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) way_string[c->icache.ways], c->icache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) dcache_size >> 10, way_string[c->dcache.ways],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) (c->dcache.flags & MIPS_CACHE_ALIASES) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) "cache aliases" : "no aliases",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) c->dcache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static void probe_vcache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) unsigned int config2, lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (current_cpu_type() != CPU_LOONGSON64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) config2 = read_c0_config2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if ((lsize = ((config2 >> 20) & 15)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) c->vcache.linesz = 2 << lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) c->vcache.linesz = lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) c->vcache.sets = 64 << ((config2 >> 24) & 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) c->vcache.ways = 1 + ((config2 >> 16) & 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) c->vcache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) c->vcache.waysize = vcache_size / c->vcache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * If you even _breathe_ on this function, look at the gcc output and make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * it does not pop things on and off the stack for the cache sizing loop that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * executes in KSEG1 space or else you will crash and burn badly. You have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * been warned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static int probe_scache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) unsigned long flags, addr, begin, end, pow2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned int config = read_c0_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (config & CONF_SC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) begin = (unsigned long) &_stext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) begin &= ~((4 * 1024 * 1024) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) end = begin + (4 * 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * This is such a bitch, you'd think they would make it easy to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * this. Away you daemons of stupidity!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* Fill each size-multiple cache line with a valid tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) pow2 = (64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) for (addr = begin; addr < end; addr = (begin + pow2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) unsigned long *p = (unsigned long *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) pow2 <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* Load first line with zero (therefore invalid) tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) write_c0_taglo(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) write_c0_taghi(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) cache_op(Index_Store_Tag_I, begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) cache_op(Index_Store_Tag_D, begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) cache_op(Index_Store_Tag_SD, begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /* Now search for the wrap around point. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) pow2 = (128 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) cache_op(Index_Load_Tag_SD, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (!read_c0_taglo())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) pow2 <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) addr -= begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) scache_size = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) c->scache.ways = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) c->scache.waybit = 0; /* does not matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static void loongson2_sc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) scache_size = 512*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) c->scache.linesz = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) c->scache.ways = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) c->scache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) c->scache.waysize = scache_size / (c->scache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) c->options |= MIPS_CPU_INCLUSIVE_CACHES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static void loongson3_sc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) unsigned int config2, lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) config2 = read_c0_config2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) lsize = (config2 >> 4) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (lsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) c->scache.linesz = 2 << lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) c->scache.linesz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) c->scache.sets = 64 << ((config2 >> 8) & 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) c->scache.ways = 1 + (config2 & 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) scache_size = c->scache.sets *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) c->scache.ways *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) c->scache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) scache_size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) scache_size *= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) c->scache.waybit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) c->scache.waysize = scache_size / c->scache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (scache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) c->options |= MIPS_CPU_INCLUSIVE_CACHES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) extern int r5k_sc_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) extern int rm7k_sc_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) extern int mips_sc_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static void setup_scache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) unsigned int config = read_c0_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) int sc_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * Do the probing thing on R4000SC and R4400SC processors. Other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * processors don't have a S-cache that would be relevant to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * Linux memory management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) case CPU_R4000SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) case CPU_R4000MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) case CPU_R4400SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) case CPU_R4400MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) sc_present = run_uncached(probe_scache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (sc_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) c->options |= MIPS_CPU_CACHE_CDEX_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) case CPU_R10000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) case CPU_R12000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) case CPU_R14000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) case CPU_R16000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) c->scache.linesz = 64 << ((config >> 13) & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) c->scache.ways = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) c->scache.waybit= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) sc_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) case CPU_R5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) case CPU_NEVADA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) #ifdef CONFIG_R5000_CPU_SCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) r5k_sc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case CPU_RM7000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) #ifdef CONFIG_RM7000_CPU_SCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) rm7k_sc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) case CPU_LOONGSON2EF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) loongson2_sc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) case CPU_LOONGSON64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) loongson3_sc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) case CPU_XLP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* don't need to worry about L2, fully coherent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) #ifdef CONFIG_MIPS_CPU_SCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (mips_sc_init ()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) scache_size >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) way_string[c->scache.ways], c->scache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (current_cpu_type() == CPU_BMIPS5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) c->options |= MIPS_CPU_INCLUSIVE_CACHES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) sc_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (!sc_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /* compute a couple of other cache variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) c->scache.waysize = scache_size / c->scache.ways;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) c->options |= MIPS_CPU_INCLUSIVE_CACHES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) void au1x00_fixup_config_od(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * c0_config.od (bit 19) was write only (and read as 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * on the early revisions of Alchemy SOCs. It disables the bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * transaction overlapping and needs to be set to fix various errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) switch (read_c0_prid()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) case 0x00030100: /* Au1000 DA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case 0x00030201: /* Au1000 HA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) case 0x00030202: /* Au1000 HB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) case 0x01030200: /* Au1500 AB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * Au1100 errata actually keeps silence about this bit, so we set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * just in case for those revisions that require it to be set according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * to the (now gone) cpu table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) case 0x02030200: /* Au1100 AB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) case 0x02030201: /* Au1100 BA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) case 0x02030202: /* Au1100 BC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) set_c0_config(1 << 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) /* CP0 hazard avoidance. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) #define NXP_BARRIER() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) ".set noreorder\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) "nop; nop; nop; nop; nop; nop;\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ".set reorder\n\t")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static void nxp_pr4450_fixup_config(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) unsigned long config0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) config0 = read_c0_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /* clear all three cache coherency fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) config0 &= ~(0x7 | (7 << 25) | (7 << 28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) ((_page_cachable_default >> _CACHE_SHIFT) << 28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) write_c0_config(config0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) NXP_BARRIER();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static int cca = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) static int __init cca_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) get_option(&str, &cca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) early_param("cca", cca_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) static void coherency_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (cca < 0 || cca > 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) cca = read_c0_config() & CONF_CM_CMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) _page_cachable_default = cca << _CACHE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) pr_debug("Using cache attribute %d\n", cca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) change_c0_config(CONF_CM_CMASK, cca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * c0_status.cu=0 specifies that updates by the sc instruction use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * the coherency mode specified by the TLB; 1 means cachable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * coherent update on write will be used. Not all processors have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * this bit and; some wire it to zero, others like Toshiba had the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * silly idea of putting something else there ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) case CPU_R4000PC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) case CPU_R4000SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) case CPU_R4000MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case CPU_R4400PC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) case CPU_R4400SC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) case CPU_R4400MC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) clear_c0_config(CONF_CU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * We need to catch the early Alchemy SOCs with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * the write-only co_config.od bit and set it back to one on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) case CPU_ALCHEMY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) au1x00_fixup_config_od();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) case PRID_IMP_PR4450:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) nxp_pr4450_fixup_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static void r4k_cache_error_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) extern char __weak except_vec2_generic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) extern char __weak except_vec2_sb1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) case CPU_SB1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) case CPU_SB1A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) set_uncached_handler(0x100, &except_vec2_generic, 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) void r4k_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) extern void build_clear_page(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) extern void build_copy_page(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct cpuinfo_mips *c = ¤t_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) probe_pcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) probe_vcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) setup_scache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) r4k_blast_dcache_page_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) r4k_blast_dcache_page_indexed_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) r4k_blast_dcache_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) r4k_blast_icache_page_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) r4k_blast_icache_page_indexed_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) r4k_blast_icache_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) r4k_blast_scache_page_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) r4k_blast_scache_page_indexed_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) r4k_blast_scache_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) r4k_blast_scache_node_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) #ifdef CONFIG_EVA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) r4k_blast_dcache_user_page_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) r4k_blast_icache_user_page_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * Some MIPS32 and MIPS64 processors have physically indexed caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * This code supports virtually indexed processors and will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * unnecessarily inefficient on physically indexed processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (c->dcache.linesz && cpu_has_dc_aliases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) shm_align_mask = max_t( unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) c->dcache.sets * c->dcache.linesz - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) shm_align_mask = PAGE_SIZE-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) __flush_cache_vmap = r4k__flush_cache_vmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) __flush_cache_vunmap = r4k__flush_cache_vunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) flush_cache_all = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) __flush_cache_all = r4k___flush_cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) flush_cache_mm = r4k_flush_cache_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) flush_cache_page = r4k_flush_cache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) flush_cache_range = r4k_flush_cache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) flush_icache_all = r4k_flush_icache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) local_flush_data_cache_page = local_r4k_flush_data_cache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) flush_data_cache_page = r4k_flush_data_cache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) flush_icache_range = r4k_flush_icache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) local_flush_icache_range = local_r4k_flush_icache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) __flush_icache_user_range = r4k_flush_icache_user_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) #ifdef CONFIG_DMA_NONCOHERENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) #ifdef CONFIG_DMA_MAYBE_COHERENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (coherentio == IO_COHERENCE_ENABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) _dma_cache_wback_inv = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) _dma_cache_wback = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) _dma_cache_inv = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) #endif /* CONFIG_DMA_MAYBE_COHERENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) _dma_cache_wback = r4k_dma_cache_wback_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) _dma_cache_inv = r4k_dma_cache_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) #endif /* CONFIG_DMA_NONCOHERENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) build_clear_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) build_copy_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * We want to run CMP kernels on core with and without coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * or not to flush caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) local_r4k___flush_cache_all(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) coherency_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) board_cache_error_setup = r4k_cache_error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * Per-CPU overrides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) case CPU_BMIPS4350:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) case CPU_BMIPS4380:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /* No IPI is needed because all CPUs share the same D$ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) flush_data_cache_page = r4k_blast_dcache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) case CPU_BMIPS5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /* We lose our superpowers if L2 is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /* I$ fills from D$ just by emptying the write buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) flush_cache_page = (void *)b5k_instruction_hazard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) flush_cache_range = (void *)b5k_instruction_hazard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) local_flush_data_cache_page = (void *)b5k_instruction_hazard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) flush_data_cache_page = (void *)b5k_instruction_hazard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) flush_icache_range = (void *)b5k_instruction_hazard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) local_flush_icache_range = (void *)b5k_instruction_hazard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) /* Optimization: an L2 flush implicitly flushes the L1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) case CPU_LOONGSON64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) /* Loongson-3 maintains cache coherency by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) __flush_cache_all = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) __flush_cache_vmap = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) __flush_cache_vunmap = cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) __flush_kernel_vmap_range = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) flush_cache_mm = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) flush_cache_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) flush_cache_range = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) flush_icache_all = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) flush_data_cache_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) local_flush_data_cache_page = (void *)cache_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) case CPU_PM_ENTER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) case CPU_PM_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) coherency_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) static struct notifier_block r4k_cache_pm_notifier_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) .notifier_call = r4k_cache_pm_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) int __init r4k_cache_init_pm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) arch_initcall(r4k_cache_init_pm);