^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * srmmu.c: SRMMU specific routines for memory management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/io-unit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/bitext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/vaddrs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/mbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/asi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Now the cpu specific definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/turbosparc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/tsunami.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/viking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/swift.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/leon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/mxcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/ross.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "mm_32.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) enum mbus_module srmmu_modtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static unsigned int hwbug_bitmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int vac_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) EXPORT_SYMBOL(vac_cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int vac_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) extern struct resource sparc_iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) extern unsigned long last_valid_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static pgd_t *srmmu_swapper_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) EXPORT_SYMBOL(sparc32_cachetlb_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) const struct sparc32_cachetlb_ops *local_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define FLUSH_BEGIN(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define FLUSH_END }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int flush_page_for_dma_global = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) char *srmmu_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ctxd_t *srmmu_ctx_table_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static ctxd_t *srmmu_context_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int viking_mxcc_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static DEFINE_SPINLOCK(srmmu_context_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int is_hypersparc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int srmmu_cache_pagetables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* these will be initialized in srmmu_nocache_calcsize() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static unsigned long srmmu_nocache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static unsigned long srmmu_nocache_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* The context table is a nocache user with the biggest alignment needs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void *srmmu_nocache_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct bit_map srmmu_nocache_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline int srmmu_pmd_none(pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) { return !(pmd_val(pmd) & 0xFFFFFFF); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* XXX should we hyper_flush_whole_icache here - Anton */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) set_pte((pte_t *)ctxp, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Locations of MSI Registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Useful bits in the MSI Registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void msi_set_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) "andn %%g3, %2, %%g3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) "sta %%g3, [%0] %1\n\t" : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "r" (MSI_MBUS_ARBEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void pmd_set(pmd_t *pmdp, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long ptp = __nocache_pa(ptep) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * size: bytes to allocate in the nocache area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * align: bytes, number to align at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Returns the virtual address of the allocated area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void *__srmmu_get_nocache(int size, int align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (size < minsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) printk(KERN_ERR "Size 0x%x too small for nocache request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) size = minsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (size & (minsz - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) size += minsz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) offset = bit_map_string_get(&srmmu_nocache_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) size >> SRMMU_NOCACHE_BITMAP_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) align >> SRMMU_NOCACHE_BITMAP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (offset == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) size, (int) srmmu_nocache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void *srmmu_get_nocache(int size, int align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) tmp = __srmmu_get_nocache(size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) memset(tmp, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void srmmu_free_nocache(void *addr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) vaddr = (unsigned long)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (vaddr < SRMMU_NOCACHE_VADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (vaddr + size > srmmu_nocache_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) vaddr, srmmu_nocache_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!is_power_of_2(size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) printk("Size 0x%x is not a power of 2\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) printk("Size 0x%x is too small\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (vaddr & (size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bit_map_clear(&srmmu_nocache_map, offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Return how much physical memory we have. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static unsigned long __init probe_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) for (i = 0; sp_banks[i].num_bytes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) total += sp_banks[i].num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Reserve nocache dynamically proportionally to the amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void __init srmmu_nocache_calcsize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long sysmemavail = probe_memory() / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int srmmu_nocache_npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) srmmu_nocache_npages =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* anything above 1280 blows up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void __init srmmu_nocache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void *srmmu_nocache_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned int bitmap_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned long paddr, vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long pteval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) SRMMU_NOCACHE_ALIGN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!srmmu_nocache_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) panic("%s: Failed to allocate %lu bytes align=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) srmmu_nocache_bitmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!srmmu_nocache_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) BITS_TO_LONGS(bitmap_bits) * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) init_mm.pgd = srmmu_swapper_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) paddr = __pa((unsigned long)srmmu_nocache_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) vaddr = SRMMU_NOCACHE_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) while (vaddr < srmmu_nocache_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pgd = pgd_offset_k(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) p4d = p4d_offset(pgd, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pud = pud_offset(p4d, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pmd = pmd_offset(__nocache_fix(pud), vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (srmmu_cache_pagetables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pteval |= SRMMU_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) set_pte(__nocache_fix(pte), __pte(pteval));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) vaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) paddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pgd_t *get_pgd_fast(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pgd_t *pgd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (pgd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pgd_t *init = pgd_offset_k(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Hardware needs alignment to 256 only, but we align to whole page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * to reduce fragmentation problems due to the buddy principle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * XXX Provide actual fragmentation statistics in /proc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Alignments up to the page size are the same for physical and virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * addresses of the nocache area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pgtable_t pte_alloc_one(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if ((ptep = pte_alloc_one_kernel(mm)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) page_ref_dec(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ptep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void pte_free(struct mm_struct *mm, pgtable_t ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_lock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (page_ref_dec_return(page) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pgtable_pte_page_dtor(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_unlock(&mm->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* context handling - a dynamically sized pool is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define NO_CONTEXT -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct ctx_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct ctx_list *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct ctx_list *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned int ctx_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct mm_struct *ctx_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static struct ctx_list *ctx_list_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static struct ctx_list ctx_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static struct ctx_list ctx_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* At boot time we determine the number of contexts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int num_contexts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static inline void remove_from_ctx_list(struct ctx_list *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) entry->next->prev = entry->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) entry->prev->next = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) entry->next = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (entry->prev = head->prev)->next = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) head->prev = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct ctx_list *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ctxp = ctx_free.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (ctxp != &ctx_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) remove_from_ctx_list(ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) add_to_used_ctxlist(ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mm->context = ctxp->ctx_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ctxp->ctx_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ctxp = ctx_used.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ctxp->ctx_mm == old_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ctxp = ctxp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ctxp == &ctx_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) panic("out of mmu contexts");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) flush_cache_mm(ctxp->ctx_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) flush_tlb_mm(ctxp->ctx_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) remove_from_ctx_list(ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) add_to_used_ctxlist(ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ctxp->ctx_mm->context = NO_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ctxp->ctx_mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mm->context = ctxp->ctx_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static inline void free_context(int context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct ctx_list *ctx_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ctx_old = ctx_list_pool + context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) remove_from_ctx_list(ctx_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) add_to_free_ctxlist(ctx_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void __init sparc_context_init(int numctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) size = numctx * sizeof(struct ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!ctx_list_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) panic("%s: Failed to allocate %lu bytes\n", __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) for (ctx = 0; ctx < numctx; ctx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct ctx_list *clist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) clist = (ctx_list_pool + ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) clist->ctx_number = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) clist->ctx_mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ctx_free.next = ctx_free.prev = &ctx_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ctx_used.next = ctx_used.prev = &ctx_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) for (ctx = 0; ctx < numctx; ctx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) add_to_free_ctxlist(ctx_list_pool + ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (mm->context == NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) spin_lock_irqsave(&srmmu_context_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) alloc_context(old_mm, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (sparc_cpu_model == sparc_leon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) leon_switch_mm();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (is_hypersparc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) hyper_flush_whole_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) srmmu_set_context(mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Low level IO area allocation on the SRMMU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static inline void srmmu_mapioaddr(unsigned long physaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) unsigned long virt_addr, int bus_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) physaddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pgdp = pgd_offset_k(virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) p4dp = p4d_offset(pgdp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pudp = pud_offset(p4dp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pmdp = pmd_offset(pudp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ptep = pte_offset_kernel(pmdp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) tmp = (physaddr >> 4) | SRMMU_ET_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* I need to test whether this is consistent over all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * sun4m's. The bus_type represents the upper 4 bits of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * 36-bit physical address on the I/O space lines...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) tmp |= (bus_type << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tmp |= SRMMU_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) __flush_page_to_ram(virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) set_pte(ptep, __pte(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long xva, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) while (len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) len -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) srmmu_mapioaddr(xpa, xva, bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) xva += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) xpa += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static inline void srmmu_unmapioaddr(unsigned long virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pgdp = pgd_offset_k(virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) p4dp = p4d_offset(pgdp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) pudp = pud_offset(p4dp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pmdp = pmd_offset(pudp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ptep = pte_offset_kernel(pmdp, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* No need to flush uncacheable page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) __pte_clear(ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) while (len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) len -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) srmmu_unmapioaddr(virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) virt_addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* tsunami.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) extern void tsunami_flush_cache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) extern void tsunami_flush_cache_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) extern void tsunami_flush_page_to_ram(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) extern void tsunami_flush_page_for_dma(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) extern void tsunami_flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) extern void tsunami_setup_blockops(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* swift.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) extern void swift_flush_cache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) extern void swift_flush_cache_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) extern void swift_flush_cache_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) extern void swift_flush_page_to_ram(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) extern void swift_flush_page_for_dma(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) extern void swift_flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) extern void swift_flush_tlb_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) extern void swift_flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #if 0 /* P3: deadwood to debug precise flushes on Swift. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int cctx, ctx1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) page &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if ((ctx1 = vma->vm_mm->context) != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) cctx = srmmu_get_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Is context # ever different from current context? P3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (cctx != ctx1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) printk("flush ctx %02x curr %02x\n", ctx1, cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) srmmu_set_context(ctx1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) swift_flush_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) "r" (page), "i" (ASI_M_FLUSH_PROBE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) srmmu_set_context(cctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Rm. prot. bits from virt. c. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* swift_flush_cache_all(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* swift_flush_cache_page(vma, page); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) swift_flush_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) "r" (page), "i" (ASI_M_FLUSH_PROBE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* same as above: srmmu_flush_tlb_page() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * The following are all MBUS based SRMMU modules, and therefore could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * be found in a multiprocessor configuration. On the whole, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * chips seems to be much more touchy about DVMA and page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * with respect to cache coherency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* viking.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) extern void viking_flush_cache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) extern void viking_flush_cache_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) extern void viking_flush_page_to_ram(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) extern void viking_flush_page_for_dma(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) extern void viking_flush_page(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) extern void viking_mxcc_flush_page(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) extern void viking_flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) extern void viking_flush_tlb_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) extern void viking_flush_tlb_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) extern void sun4dsmp_flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* hypersparc.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) extern void hypersparc_flush_cache_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) extern void hypersparc_flush_page_to_ram(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) extern void hypersparc_flush_page_for_dma(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) extern void hypersparc_flush_tlb_all(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) extern void hypersparc_setup_blockops(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * NOTE: All of this startup code assumes the low 16mb (approx.) of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * kernel mappings are done with one single contiguous chunk of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * ram. On small ram machines (classics mainly) we only get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * around 8mb mapped for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void __init early_pgtable_allocfail(char *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) pgdp = pgd_offset_k(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) p4dp = p4d_offset(pgdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) pudp = pud_offset(p4dp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) pmdp = __srmmu_get_nocache(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (pmdp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) early_pgtable_allocfail("pmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pud_set(__nocache_fix(pudp), pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) pmdp = pmd_offset(__nocache_fix(pudp), start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (ptep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) early_pgtable_allocfail("pte");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) memset(__nocache_fix(ptep), 0, PTE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) pmd_set(__nocache_fix(pmdp), ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (start > (0xffffffffUL - PMD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) start = (start + PMD_SIZE) & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pgdp = pgd_offset_k(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) p4dp = p4d_offset(pgdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pudp = pud_offset(p4dp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (pud_none(*pudp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (pmdp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) early_pgtable_allocfail("pmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) pud_set((pud_t *)pgdp, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pmdp = pmd_offset(pudp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (srmmu_pmd_none(*pmdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ptep = __srmmu_get_nocache(PTE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) PTE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (ptep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) early_pgtable_allocfail("pte");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) memset(ptep, 0, PTE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) pmd_set(pmdp, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (start > (0xffffffffUL - PMD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) start = (start + PMD_SIZE) & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* These flush types are not available on all chips... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static inline unsigned long srmmu_probe(unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned long retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (sparc_cpu_model != sparc_leon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) vaddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) __asm__ __volatile__("lda [%1] %2, %0\n\t" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) "=r" (retval) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) retval = leon_swprobe(vaddr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * This is much cleaner than poking around physical address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * looking at the prom's page table directly which is what most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * other OS's do. Yuck... this is much better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void __init srmmu_inherit_prom_mappings(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned long probed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) while (start <= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) break; /* probably wrap around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (start == 0xfef00000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) start = KADB_DEBUGGER_BEGVM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) probed = srmmu_probe(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!probed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* continue probing until we find an entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* A red snapper, see what it really is. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) what = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) addr = start - PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!(start & ~(PMD_MASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (srmmu_probe(addr + PMD_SIZE) == probed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) what = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!(start & ~(PGDIR_MASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (srmmu_probe(addr + PGDIR_SIZE) == probed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) what = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) pgdp = pgd_offset_k(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) p4dp = p4d_offset(pgdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) pudp = pud_offset(p4dp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (what == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) start += PGDIR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) SRMMU_PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (pmdp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) early_pgtable_allocfail("pmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) pud_set(__nocache_fix(pudp), pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pmdp = pmd_offset(__nocache_fix(pgdp), start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (what == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) start += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (ptep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) early_pgtable_allocfail("pte");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) memset(__nocache_fix(ptep), 0, PTE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pmd_set(__nocache_fix(pmdp), ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) *(pte_t *)__nocache_fix(ptep) = __pte(probed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Create a third-level SRMMU 16MB page mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pgd_t *pgdp = pgd_offset_k(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned long big_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) big_pte = KERNEL_PTE(phys_base >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) unsigned long vstart = (vbase & PGDIR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* Map "low" memory only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) const unsigned long min_vaddr = PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (vstart < min_vaddr || vstart >= max_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (vend > max_vaddr || vend < min_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) vend = max_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) while (vstart < vend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) do_large_mapping(vstart, pstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static void __init map_kernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (phys_base > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) do_large_mapping(PAGE_OFFSET, phys_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) for (i = 0; sp_banks[i].num_bytes != 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) void (*poke_srmmu)(void) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) void __init srmmu_paging_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) phandle cpunode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) char node_str[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) unsigned long pages_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) init_mm.context = (unsigned long) NO_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (sparc_cpu_model == sun4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) num_contexts = 65536; /* We know it is Viking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* Find the number of contexts on the srmmu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cpunode = prom_getchild(prom_root_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) num_contexts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) while (cpunode != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (!strcmp(node_str, "cpu")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) cpunode = prom_getsibling(cpunode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (!num_contexts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) prom_printf("Something wrong, can't find cpu node in paging_init.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) pages_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) last_valid_pfn = bootmem_init(&pages_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) srmmu_nocache_calcsize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) srmmu_nocache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) map_kernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* ctx table has to be physically aligned to its size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) for (i = 0; i < num_contexts; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* Stop from hanging here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) local_ops->tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) poke_srmmu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) srmmu_allocate_ptable_skeleton(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) pgd = pgd_offset_k(PKMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) p4d = p4d_offset(pgd, PKMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pud = pud_offset(p4d, PKMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) pmd = pmd_offset(pud, PKMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) pte = pte_offset_kernel(pmd, PKMAP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) pkmap_page_table = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) sparc_context_init(num_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) kmap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) max_zone_pfn[ZONE_DMA] = max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) max_zone_pfn[ZONE_HIGHMEM] = highend_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) free_area_init(max_zone_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) void mmu_info(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) "MMU type\t: %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) "contexts\t: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) "nocache total\t: %ld\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) "nocache used\t: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) srmmu_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) num_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) srmmu_nocache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) mm->context = NO_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) void destroy_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) flush_cache_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) flush_tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) spin_lock_irqsave(&srmmu_context_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) free_context(mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) mm->context = NO_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /* Init various srmmu chip types. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void __init srmmu_is_bad(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) prom_printf("Could not determine SRMMU chip type.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static void __init init_vac_layout(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) phandle nd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int cache_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) char node_str[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) unsigned long max_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) unsigned long min_line_size = 0x10000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) nd = prom_getchild(prom_root_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) while ((nd = prom_getsibling(nd)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) prom_getstring(nd, "device_type", node_str, sizeof(node_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!strcmp(node_str, "cpu")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) vac_line_size = prom_getint(nd, "cache-line-size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (vac_line_size == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) prom_printf("can't determine cache-line-size, halting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) cache_lines = prom_getint(nd, "cache-nlines");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (cache_lines == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) prom_printf("can't determine cache-nlines, halting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) vac_cache_size = cache_lines * vac_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (vac_cache_size > max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) max_size = vac_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (vac_line_size < min_line_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) min_line_size = vac_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) //FIXME: cpus not contiguous!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) cpu++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (cpu >= nr_cpu_ids || !cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (nd == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) prom_printf("No CPU nodes found, halting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) vac_cache_size = max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) vac_line_size = min_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) (int)vac_cache_size, (int)vac_line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void poke_hypersparc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) volatile unsigned long clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) unsigned long mreg = srmmu_get_mmureg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) hyper_flush_unconditional_combined();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) mreg &= ~(HYPERSPARC_CWENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) mreg |= (HYPERSPARC_CMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) srmmu_set_mmureg(mreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) #if 0 /* XXX I think this is bad news... -DaveM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) hyper_clear_all_tags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) hyper_flush_whole_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) clear = srmmu_get_faddr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) clear = srmmu_get_fstatus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static const struct sparc32_cachetlb_ops hypersparc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .cache_all = hypersparc_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) .cache_mm = hypersparc_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .cache_page = hypersparc_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .cache_range = hypersparc_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .tlb_all = hypersparc_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .tlb_mm = hypersparc_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .tlb_page = hypersparc_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .tlb_range = hypersparc_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .page_to_ram = hypersparc_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .sig_insns = hypersparc_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .page_for_dma = hypersparc_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static void __init init_hypersparc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) srmmu_name = "ROSS HyperSparc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) srmmu_modtype = HyperSparc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) init_vac_layout();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) is_hypersparc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) sparc32_cachetlb_ops = &hypersparc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) poke_srmmu = poke_hypersparc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) hypersparc_setup_blockops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void poke_swift(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) unsigned long mreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Clear any crap from the cache or else... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) swift_flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* Enable I & D caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) mreg = srmmu_get_mmureg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) mreg |= (SWIFT_IE | SWIFT_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * The Swift branch folding logic is completely broken. At
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * trap time, if things are just right, if can mistakenly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * think that a trap is coming from kernel mode when in fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * it is coming from user mode (it mis-executes the branch in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * the trap code). So you see things like crashme completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * hosing your machine which is completely unacceptable. Turn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * this shit off... nice job Fujitsu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) mreg &= ~(SWIFT_BF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) srmmu_set_mmureg(mreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static const struct sparc32_cachetlb_ops swift_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .cache_all = swift_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .cache_mm = swift_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .cache_page = swift_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .cache_range = swift_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) .tlb_all = swift_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) .tlb_mm = swift_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .tlb_page = swift_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) .tlb_range = swift_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) .page_to_ram = swift_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) .sig_insns = swift_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) .page_for_dma = swift_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) #define SWIFT_MASKID_ADDR 0x10003018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static void __init init_swift(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) unsigned long swift_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) __asm__ __volatile__("lda [%1] %2, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) "srl %0, 0x18, %0\n\t" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) "=r" (swift_rev) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) srmmu_name = "Fujitsu Swift";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) switch (swift_rev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) case 0x11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) case 0x20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) case 0x23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) case 0x30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) srmmu_modtype = Swift_lots_o_bugs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * Gee george, I wonder why Sun is so hush hush about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * this hardware bug... really braindamage stuff going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * on here. However I think we can find a way to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * all of the workaround overhead under Linux. Basically,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * any page fault can cause kernel pages to become user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * accessible (the mmu gets confused and clears some of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * the ACC bits in kernel ptes). Aha, sounds pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * horrible eh? But wait, after extensive testing it appears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * that if you use pgd_t level large kernel pte's (like the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * 4MB pages on the Pentium) the bug does not get tripped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * at all. This avoids almost all of the major overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * Welcome to a world where your vendor tells you to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * "apply this kernel patch" instead of "sorry for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * broken hardware, send it back and we'll give you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * properly functioning parts"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) case 0x25:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) case 0x31:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) srmmu_modtype = Swift_bad_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * You see Sun allude to this hardware bug but never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * admit things directly, they'll say things like,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * "the Swift chip cache problems" or similar.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) srmmu_modtype = Swift_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) sparc32_cachetlb_ops = &swift_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) flush_page_for_dma_global = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Are you now convinced that the Swift is one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * biggest VLSI abortions of all time? Bravo Fujitsu!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Fujitsu, the !#?!%$'d up processor people. I bet if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * you examined the microcode of the Swift you'd find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * XXX's all over the place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) poke_srmmu = poke_swift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void turbosparc_flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) flush_user_windows();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) turbosparc_idflash_clear();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static void turbosparc_flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) FLUSH_BEGIN(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) flush_user_windows();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) turbosparc_idflash_clear();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) FLUSH_BEGIN(vma->vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) flush_user_windows();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) turbosparc_idflash_clear();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) FLUSH_BEGIN(vma->vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) flush_user_windows();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (vma->vm_flags & VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) turbosparc_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) turbosparc_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* TurboSparc is copy-back, if we turn it on, but this does not work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void turbosparc_flush_page_to_ram(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) #ifdef TURBOSPARC_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) volatile unsigned long clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (srmmu_probe(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) turbosparc_flush_page_cache(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) clear = srmmu_get_fstatus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static void turbosparc_flush_page_for_dma(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) turbosparc_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static void turbosparc_flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) srmmu_flush_whole_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) FLUSH_BEGIN(mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) srmmu_flush_whole_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) FLUSH_BEGIN(vma->vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) srmmu_flush_whole_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) FLUSH_BEGIN(vma->vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) srmmu_flush_whole_tlb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) FLUSH_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void poke_turbosparc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned long mreg = srmmu_get_mmureg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) unsigned long ccreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* Clear any crap from the cache or else... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) turbosparc_flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Temporarily disable I & D caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) srmmu_set_mmureg(mreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ccreg = turbosparc_get_ccreg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) #ifdef TURBOSPARC_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* Write-back D-cache, emulate VLSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * abortion number three, not number one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* For now let's play safe, optimize later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Do DVMA snooping in Dcache, Write-thru D-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ccreg &= ~(TURBOSPARC_uS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /* Emulate VLSI abortion number three, not number one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) switch (ccreg & 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case 0: /* No SE cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) case 7: /* Test mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) ccreg |= (TURBOSPARC_SCENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) turbosparc_set_ccreg(ccreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) srmmu_set_mmureg(mreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static const struct sparc32_cachetlb_ops turbosparc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) .cache_all = turbosparc_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .cache_mm = turbosparc_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .cache_page = turbosparc_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) .cache_range = turbosparc_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) .tlb_all = turbosparc_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) .tlb_mm = turbosparc_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) .tlb_page = turbosparc_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) .tlb_range = turbosparc_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) .page_to_ram = turbosparc_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) .sig_insns = turbosparc_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) .page_for_dma = turbosparc_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static void __init init_turbosparc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) srmmu_name = "Fujitsu TurboSparc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) srmmu_modtype = TurboSparc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) sparc32_cachetlb_ops = &turbosparc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) poke_srmmu = poke_turbosparc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static void poke_tsunami(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) unsigned long mreg = srmmu_get_mmureg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) tsunami_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) tsunami_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) mreg &= ~TSUNAMI_ITD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) srmmu_set_mmureg(mreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static const struct sparc32_cachetlb_ops tsunami_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) .cache_all = tsunami_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .cache_mm = tsunami_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .cache_page = tsunami_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .cache_range = tsunami_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .tlb_all = tsunami_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .tlb_mm = tsunami_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .tlb_page = tsunami_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .tlb_range = tsunami_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) .page_to_ram = tsunami_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) .sig_insns = tsunami_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) .page_for_dma = tsunami_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static void __init init_tsunami(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * Tsunami's pretty sane, Sun and TI actually got it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * somewhat right this time. Fujitsu should have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * taken some lessons from them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) srmmu_name = "TI Tsunami";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) srmmu_modtype = Tsunami;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) sparc32_cachetlb_ops = &tsunami_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) poke_srmmu = poke_tsunami;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) tsunami_setup_blockops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static void poke_viking(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) unsigned long mreg = srmmu_get_mmureg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static int smp_catch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (viking_mxcc_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) unsigned long mxcc_control = mxcc_get_creg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) mxcc_control &= ~(MXCC_CTL_RRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) mxcc_set_creg(mxcc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * We don't need memory parity checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * XXX This is a mess, have to dig out later. ecd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* We do cache ptables on MXCC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) mreg |= VIKING_TCENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) unsigned long bpreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) mreg &= ~(VIKING_TCENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (smp_catch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /* Must disable mixed-cmd mode here for other cpu's. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) bpreg = viking_get_bpreg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) bpreg &= ~(VIKING_ACTION_MIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) viking_set_bpreg(bpreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Just in case PROM does something funny. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) msi_set_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) mreg |= VIKING_SPENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) mreg |= VIKING_SBENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) mreg &= ~(VIKING_ACENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) srmmu_set_mmureg(mreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .cache_all = viking_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) .cache_mm = viking_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) .cache_page = viking_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) .cache_range = viking_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) .tlb_all = viking_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) .tlb_mm = viking_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) .tlb_page = viking_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) .tlb_range = viking_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) .page_to_ram = viking_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) .sig_insns = viking_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) .page_for_dma = viking_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* On sun4d the cpu broadcasts local TLB flushes, so we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * perform the local TLB flush and all the other cpus will see it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * But, unfortunately, there is a bug in the sun4d XBUS backplane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * that requires that we add some synchronization to these flushes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * The bug is that the fifo which keeps track of all the pending TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * broadcasts in the system is an entry or two too small, so if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * have too many going at once we'll overflow that fifo and lose a TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * flush resulting in corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * Our workaround is to take a global spinlock around the TLB flushes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * which guarentees we won't ever have too many pending. It's a big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * hammer, but a semaphore like system to make sure we only have N TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * flushes going at once will require SMP locking anyways so there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * no real value in trying any harder than this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) .cache_all = viking_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .cache_mm = viking_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .cache_page = viking_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) .cache_range = viking_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .tlb_all = sun4dsmp_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .tlb_mm = sun4dsmp_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .tlb_page = sun4dsmp_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) .tlb_range = sun4dsmp_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) .page_to_ram = viking_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) .sig_insns = viking_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) .page_for_dma = viking_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void __init init_viking(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) unsigned long mreg = srmmu_get_mmureg();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /* Ahhh, the viking. SRMMU VLSI abortion number two... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (mreg & VIKING_MMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) srmmu_name = "TI Viking";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) viking_mxcc_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) msi_set_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * We need this to make sure old viking takes no hits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * on it's cache for dma snoops to workaround the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * "load from non-cacheable memory" interrupt bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * This is only necessary because of the new way in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * which we use the IOMMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) viking_ops.page_for_dma = viking_flush_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) flush_page_for_dma_global = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) srmmu_name = "TI Viking/MXCC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) viking_mxcc_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) srmmu_cache_pagetables = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) &viking_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (sparc_cpu_model == sun4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) &viking_sun4d_smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) poke_srmmu = poke_viking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /* Probe for the srmmu chip version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) static void __init get_srmmu_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) unsigned long mreg, psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) srmmu_modtype = SRMMU_INVAL_MOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) hwbug_bitmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) mreg = srmmu_get_mmureg(); psr = get_psr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) mod_typ = (mreg & 0xf0000000) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) mod_rev = (mreg & 0x0f000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) psr_typ = (psr >> 28) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) psr_vers = (psr >> 24) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* First, check for sparc-leon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (sparc_cpu_model == sparc_leon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) init_leon();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) /* Second, check for HyperSparc or Cypress. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (mod_typ == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) switch (mod_rev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* UP or MP Hypersparc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) init_hypersparc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) case 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* Now Fujitsu TurboSparc. It might happen that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * in Swift emulation mode, so we will check later...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (psr_typ == 0 && psr_vers == 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) init_turbosparc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* Next check for Fujitsu Swift. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (psr_typ == 0 && psr_vers == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) phandle cpunode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) char node_str[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /* Look if it is not a TurboSparc emulating Swift... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) cpunode = prom_getchild(prom_root_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) while ((cpunode = prom_getsibling(cpunode)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (!strcmp(node_str, "cpu")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) prom_getintdefault(cpunode, "psr-version", 1) == 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) init_turbosparc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) init_swift();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* Now the Viking family of srmmu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (psr_typ == 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) ((psr_vers == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) init_viking();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /* Finally the Tsunami. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) init_tsunami();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /* Oh well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) srmmu_is_bad();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* Local cross-calls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static void smp_flush_page_for_dma(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) xc1((smpfunc_t) local_ops->page_for_dma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) local_ops->page_for_dma(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static void smp_flush_cache_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) xc0((smpfunc_t) local_ops->cache_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) local_ops->cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void smp_flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) xc0((smpfunc_t) local_ops->tlb_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) local_ops->tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static void smp_flush_cache_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (!cpumask_empty(&cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) local_ops->cache_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static void smp_flush_tlb_mm(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (!cpumask_empty(&cpu_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) cpumask_copy(mm_cpumask(mm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) cpumask_of(smp_processor_id()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) local_ops->tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static void smp_flush_cache_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (!cpumask_empty(&cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) xc3((smpfunc_t) local_ops->cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) (unsigned long) vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) local_ops->cache_range(vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static void smp_flush_tlb_range(struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!cpumask_empty(&cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) xc3((smpfunc_t) local_ops->tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) (unsigned long) vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) local_ops->tlb_range(vma, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (!cpumask_empty(&cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) xc2((smpfunc_t) local_ops->cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) (unsigned long) vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) local_ops->cache_page(vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct mm_struct *mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (mm->context != NO_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (!cpumask_empty(&cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) xc2((smpfunc_t) local_ops->tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) (unsigned long) vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) local_ops->tlb_page(vma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static void smp_flush_page_to_ram(unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* Current theory is that those who call this are the one's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * who have just dirtied their cache with the pages contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * in kernel space, therefore we only run this on local cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * XXX This experiment failed, research further... -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) xc1((smpfunc_t) local_ops->page_to_ram, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) local_ops->page_to_ram(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) cpumask_t cpu_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) cpumask_copy(&cpu_mask, mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (!cpumask_empty(&cpu_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) xc2((smpfunc_t) local_ops->sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) (unsigned long) mm, insn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) local_ops->sig_insns(mm, insn_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) .cache_all = smp_flush_cache_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) .cache_mm = smp_flush_cache_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) .cache_page = smp_flush_cache_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) .cache_range = smp_flush_cache_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) .tlb_all = smp_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) .tlb_mm = smp_flush_tlb_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) .tlb_page = smp_flush_tlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) .tlb_range = smp_flush_tlb_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) .page_to_ram = smp_flush_page_to_ram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) .sig_insns = smp_flush_sig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) .page_for_dma = smp_flush_page_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* Load up routines and constants for sun4m and sun4d mmu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) void __init load_mmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /* Functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) get_srmmu_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) /* El switcheroo... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) local_ops = sparc32_cachetlb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (poke_srmmu == poke_viking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /* Avoid unnecessary cross calls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) smp_cachetlb_ops.cache_all = local_ops->cache_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) smp_cachetlb_ops.cache_range = local_ops->cache_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) smp_cachetlb_ops.cache_page = local_ops->cache_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) /* It really is const after this point. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) &smp_cachetlb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (sparc_cpu_model != sun4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) ld_mmu_iommu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (sparc_cpu_model == sun4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) sun4d_init_smp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) else if (sparc_cpu_model == sparc_leon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) leon_init_smp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sun4m_init_smp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }