^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/sparc64/mm/init.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/poison.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/starfire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/spitfire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/tsb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/mdesc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/cpudata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "init_64.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long kern_linear_pte_xor[4] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static unsigned long page_cache4v_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* A bitmap, two bits for every 256MB of physical memory. These two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * bits determine what page size we use for kernel linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * translations. They form an index into kern_linear_pte_xor[]. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * value in the indexed slot is XOR'd with the TLB miss virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * address to form the resulting TTE. The mapping is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * 0 ==> 4MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * 1 ==> 256MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * 2 ==> 2GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * 3 ==> 16GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * All sun4v chips support 256MB pages. Only SPARC-T4 and later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * support 2GB pages, and hopefully future cpus will support the 16GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * if these larger page sizes are not supported by the cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * It would be nice to determine this from the machine description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * 'cpu' properties, but we need to have this table setup before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * MDESC is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Space is allocated for this right after the trap table in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * arch/sparc64/kernel/head.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static unsigned long cpu_pgsz_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define MAX_BANKS 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static struct linux_prom64_registers pavail[MAX_BANKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int pavail_ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int cmp_p64(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) const struct linux_prom64_registers *x = a, *y = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (x->phys_addr > y->phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (x->phys_addr < y->phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void __init read_obp_memory(const char *property,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct linux_prom64_registers *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int *num_ents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) phandle node = prom_finddevice("/memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int prop_size = prom_getproplen(node, property);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int ents, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ents = prop_size / sizeof(struct linux_prom64_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (ents > MAX_BANKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) prom_printf("The machine has more %s property entries than "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "this kernel can support (%d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) property, MAX_BANKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = prom_getproperty(node, property, (char *) regs, prop_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (ret == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) prom_printf("Couldn't get %s property from /memory.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) property);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Sanitize what we got from the firmware, by page aligning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) for (i = 0; i < ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long base, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) base = regs[i].phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) size = regs[i].reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) size &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (base & ~PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long new_base = PAGE_ALIGN(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) size -= new_base - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if ((long) size < 0L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) size = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) base = new_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (size == 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* If it is empty, simply get rid of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * This simplifies the logic of the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * functions that process these arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) memmove(®s[i], ®s[i + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) (ents - i - 1) * sizeof(regs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ents--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) regs[i].phys_addr = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) regs[i].reg_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *num_ents = ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sort(regs, ents, sizeof(struct linux_prom64_registers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cmp_p64, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Kernel physical address base and size in bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long kern_base __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long kern_size __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Initial ramdisk setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) extern unsigned long sparc_ramdisk_image64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) extern unsigned int sparc_ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) extern unsigned int sparc_ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct page *mem_map_zero __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL(mem_map_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long sparc64_kern_pri_context __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long sparc64_kern_sec_context __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int num_kernel_image_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #ifdef CONFIG_DEBUG_DCFLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) atomic_t dcpage_flushes = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) inline void flush_dcache_page_impl(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) BUG_ON(tlb_type == hypervisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #ifdef CONFIG_DEBUG_DCFLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) atomic_inc(&dcpage_flushes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #ifdef DCACHE_ALIASING_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __flush_dcache_page(page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ((tlb_type == spitfire) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) page_mapping_file(page) != NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (page_mapping_file(page) != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) tlb_type == spitfire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __flush_icache_page(__pa(page_address(page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define PG_dcache_dirty PG_arch_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define PG_dcache_cpu_shift 32UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define PG_dcache_cpu_mask \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define dcache_dirty_cpu(page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline void set_dcache_dirty(struct page *page, int this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long mask = this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned long non_cpu_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __asm__ __volatile__("1:\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "ldx [%2], %%g7\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "and %%g7, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) "or %%g1, %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) "casx [%2], %%g7, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "cmp %%g7, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) "bne,pn %%xcc, 1b\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) " nop"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) : "g1", "g7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long mask = (1UL << PG_dcache_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) "1:\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) "ldx [%2], %%g7\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) "srlx %%g7, %4, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) "and %%g1, %3, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) "cmp %%g1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) "bne,pn %%icc, 2f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) " andn %%g7, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) "casx [%2], %%g7, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) "cmp %%g7, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) "bne,pn %%xcc, 1b\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) " nop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) "2:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) : "r" (cpu), "r" (mask), "r" (&page->flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) "i" (PG_dcache_cpu_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) "i" (PG_dcache_cpu_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) : "g1", "g7");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned long tsb_addr = (unsigned long) ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (tlb_type == cheetah_plus || tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) tsb_addr = __pa(tsb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) __tsb_insert(tsb_addr, tag, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void flush_dcache(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long pg_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pg_flags = page->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (pg_flags & (1UL << PG_dcache_dirty)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) PG_dcache_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int this_cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* This is just to optimize away some function calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * in the SMP case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) flush_dcache_page_impl(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) smp_flush_dcache_page_impl(page, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) clear_dcache_dirty_cpu(page, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* mm->context.lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long tsb_hash_shift, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long tte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (unlikely(!tsb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) tsb += ((address >> tsb_hash_shift) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) tag = (address >> 22UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) tsb_insert(tsb, tag, tte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int __init hugetlbpage_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) arch_initcall(hugetlbpage_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void __init pud_huge_patch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct pud_huge_patch_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) p = &__pud_huge_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) addr = p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *(unsigned int *)addr = p->insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __asm__ __volatile__("flush %0" : : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) bool __init arch_hugetlb_valid_size(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int hugepage_shift = ilog2(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned short hv_pgsz_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned int hv_pgsz_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) switch (hugepage_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) case HPAGE_16GB_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) hv_pgsz_mask = HV_PGSZ_MASK_16GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) hv_pgsz_idx = HV_PGSZ_IDX_16GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pud_huge_patch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) case HPAGE_2GB_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) hv_pgsz_mask = HV_PGSZ_MASK_2GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) hv_pgsz_idx = HV_PGSZ_IDX_2GB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) case HPAGE_256MB_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) hv_pgsz_mask = HV_PGSZ_MASK_256MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) hv_pgsz_idx = HV_PGSZ_IDX_256MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case HPAGE_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) hv_pgsz_mask = HV_PGSZ_MASK_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) hv_pgsz_idx = HV_PGSZ_IDX_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case HPAGE_64K_SHIFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) hv_pgsz_mask = HV_PGSZ_MASK_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) hv_pgsz_idx = HV_PGSZ_IDX_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) hv_pgsz_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #endif /* CONFIG_HUGETLB_PAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bool is_huge_tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) pte_t pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (tlb_type != hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned long pfn = pte_pfn(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) flush_dcache(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (!pte_accessible(mm, pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spin_lock_irqsave(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) is_huge_tsb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned long hugepage_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (is_vm_hugetlb_page(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) hugepage_size = huge_page_size(hstate_vma(vma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (hugepage_size >= PUD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long mask = 0x1ffc00000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Transfer bits [32:22] from address to resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * at 4M granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pte_val(pte) &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pte_val(pte) |= (address & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) } else if (hugepage_size >= PMD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* We are fabricating 8MB pages using 4MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * real hw pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (hugepage_size >= PMD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) REAL_HPAGE_SHIFT, address, pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) is_huge_tsb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!is_huge_tsb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) address, pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_irqrestore(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void flush_dcache_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* Do not bother with the expensive D-cache flush if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * is merely the zero page. The 'bigcore' testcase in GDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * causes this case to run millions of times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (page == ZERO_PAGE(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) this_cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mapping = page_mapping_file(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (mapping && !mapping_mapped(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int dirty = test_bit(PG_dcache_dirty, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int dirty_cpu = dcache_dirty_cpu(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (dirty_cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) smp_flush_dcache_page_impl(page, dirty_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) set_dcache_dirty(page, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* We could delay the flush for the !page_mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * case too. But that case is for exec env/arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * pages and those are %99 certainly going to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * faulted into the tlb (and thus flushed) anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) flush_dcache_page_impl(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) EXPORT_SYMBOL(flush_dcache_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) void __kprobes flush_icache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (tlb_type == spitfire) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned long kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* This code only runs on Spitfire cpus so this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * why we can assume _PAGE_PADDR_4U.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned long paddr, mask = _PAGE_PADDR_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (kaddr >= PAGE_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) paddr = kaddr & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pte_t *ptep = virt_to_kpte(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) paddr = pte_val(*ptep) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) __flush_icache_page(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) EXPORT_SYMBOL(flush_icache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void mmu_info(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static const char *pgsz_strings[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) "8K", "64K", "512K", "4MB", "32MB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) "256MB", "2GB", "16GB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int i, printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (tlb_type == cheetah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) seq_printf(m, "MMU Type\t: Cheetah\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) else if (tlb_type == cheetah_plus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) seq_printf(m, "MMU Type\t: Cheetah+\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) else if (tlb_type == spitfire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) seq_printf(m, "MMU Type\t: Spitfire\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) else if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) seq_printf(m, "MMU Type\t: ???\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) seq_printf(m, "MMU PGSZs\t: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (cpu_pgsz_mask & (1UL << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) seq_printf(m, "%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) printed ? "," : "", pgsz_strings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) printed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifdef CONFIG_DEBUG_DCFLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) seq_printf(m, "DCPageFlushes\t: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) atomic_read(&dcpage_flushes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) seq_printf(m, "DCPageFlushesXC\t: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) atomic_read(&dcpage_flushes_xcall));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #endif /* CONFIG_DEBUG_DCFLUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct linux_prom_translation prom_trans[512] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned int prom_trans_ents __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned long kern_locked_tte_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* The obp translations are saved based on 8k pagesize, since obp can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * HI_OBP_ADDRESS range are handled in ktlb.S.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static inline int in_obp_range(unsigned long vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return (vaddr >= LOW_OBP_ADDRESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) vaddr < HI_OBP_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static int cmp_ptrans(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) const struct linux_prom_translation *x = a, *y = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (x->virt > y->virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (x->virt < y->virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Read OBP translations property into 'prom_trans[]'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void __init read_obp_translations(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int n, node, ents, first, last, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) node = prom_finddevice("/virtual-memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) n = prom_getproplen(node, "translations");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (unlikely(n == 0 || n == -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) prom_printf("prom_mappings: Couldn't get size.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (unlikely(n > sizeof(prom_trans))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) prom_printf("prom_mappings: Size %d is too big.\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if ((n = prom_getproperty(node, "translations",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) (char *)&prom_trans[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) sizeof(prom_trans))) == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) prom_printf("prom_mappings: Couldn't get property.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) n = n / sizeof(struct linux_prom_translation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ents = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sort(prom_trans, ents, sizeof(struct linux_prom_translation),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) cmp_ptrans, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Now kick out all the non-OBP entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) for (i = 0; i < ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (in_obp_range(prom_trans[i].virt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) first = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (; i < ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!in_obp_range(prom_trans[i].virt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) last = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) for (i = 0; i < (last - first); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct linux_prom_translation *src = &prom_trans[i + first];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct linux_prom_translation *dest = &prom_trans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) *dest = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) for (; i < ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct linux_prom_translation *dest = &prom_trans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dest->virt = dest->size = dest->data = 0x0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) prom_trans_ents = last - first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (tlb_type == spitfire) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Clear diag TTE bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) for (i = 0; i < prom_trans_ents; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) prom_trans[i].data &= ~0x0003fe0000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Force execute bit on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) for (i = 0; i < prom_trans_ents; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) prom_trans[i].data |= (tlb_type == hypervisor ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) _PAGE_EXEC_4V : _PAGE_EXEC_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void __init hypervisor_tlb_lock(unsigned long vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned long mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) "errors with %lx\n", vaddr, 0, pte, mmu, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static unsigned long kern_large_tte(unsigned long paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static void __init remap_kernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) unsigned long phys_page, tte_vaddr, tte_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int i, tlb_ent = sparc64_highest_locked_tlbent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) tte_vaddr = (unsigned long) KERNBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) tte_data = kern_large_tte(phys_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) kern_locked_tte_data = tte_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Now lock us into the TLBs via Hypervisor or OBP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) for (i = 0; i < num_kernel_image_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) tte_vaddr += 0x400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tte_data += 0x400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) for (i = 0; i < num_kernel_image_mappings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) tte_vaddr += 0x400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) tte_data += 0x400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) CTX_CHEETAH_PLUS_NUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void __init inherit_prom_mappings(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* Now fixup OBP's idea about where we really are mapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) printk("Remapping the kernel... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) remap_kernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) printk("done.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void prom_world(int enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) set_fs(get_fs());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) __asm__ __volatile__("flushw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) void __flush_dcache_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (tlb_type == spitfire) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) for (va = start; va < end; va += 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (++n >= 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) start = __pa(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) end = __pa(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (va = start; va < end; va += 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) : "r" (va),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) "i" (ASI_DCACHE_INVALIDATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) EXPORT_SYMBOL(__flush_dcache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* get_new_mmu_context() uses "cache + 1". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) DEFINE_SPINLOCK(ctx_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned long tlb_context_cache = CTX_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #define MAX_CTX_NR (1UL << CTX_NR_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static void mmu_context_wrap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long new_ver, new_ctx, old_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Reserve kernel context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) set_bit(0, mmu_context_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (unlikely(new_ver == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) new_ver = CTX_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) tlb_context_cache = new_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * Make sure that any new mm that are added into per_cpu_secondary_mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * are going to go through get_new_mmu_context() path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Updated versions to current on those CPUs that had valid secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * If a new mm is stored after we took this mm from the array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * it will go into get_new_mmu_context() path, because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * already bumped the version in tlb_context_cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) mm = per_cpu(per_cpu_secondary_mm, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (unlikely(!mm || mm == &init_mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) old_ctx = mm->context.sparc64_ctx_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) mm->context.sparc64_ctx_val = new_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Caller does TLB context flushing on local CPU if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * The caller also ensures that CTX_VALID(mm->context) is false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * We must be careful about boundary cases so that we never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * let the user have CTX 0 (nucleus) or we ever use a CTX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * version of zero (and thus NO_CONTEXT would not be caught
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * by version mis-match tests in mmu_context.h).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Always invoked with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void get_new_mmu_context(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned long ctx, new_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) unsigned long orig_pgsz_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) spin_lock(&ctx_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* wrap might have happened, test again if our context became valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (unlikely(CTX_VALID(mm->context)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (new_ctx >= (1 << CTX_NR_BITS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (new_ctx >= ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) mmu_context_wrap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (mm->context.sparc64_ctx_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) cpumask_clear(mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) tlb_context_cache = new_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) spin_unlock(&ctx_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static int numa_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static int numa_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static int __init early_numa(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (strstr(p, "off"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) numa_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (strstr(p, "debug"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) numa_debug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) early_param("numa", early_numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #define numadbg(f, a...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) do { if (numa_debug) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) printk(KERN_INFO f, ## a); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static void __init find_ramdisk(unsigned long phys_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (sparc_ramdisk_image || sparc_ramdisk_image64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) unsigned long ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Older versions of the bootloader only supported a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * 32-bit physical address for the ramdisk image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * location, stored at sparc_ramdisk_image. Newer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * SILO versions set sparc_ramdisk_image to zero and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * provide a full 64-bit physical address at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * sparc_ramdisk_image64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ramdisk_image = sparc_ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!ramdisk_image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ramdisk_image = sparc_ramdisk_image64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Another bootloader quirk. The bootloader normalizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * the physical address to KERNBASE, so we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * factor that back out and add in the lowest valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * physical page address to get the true physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ramdisk_image -= KERNBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ramdisk_image += phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ramdisk_image, sparc_ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) initrd_start = ramdisk_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) initrd_end = ramdisk_image + sparc_ramdisk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) memblock_reserve(initrd_start, sparc_ramdisk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) initrd_start += PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) initrd_end += PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct node_mem_mask {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned long match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static struct node_mem_mask node_masks[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static int num_node_masks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct mdesc_mlgroup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) u64 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) u64 latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u64 match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static struct mdesc_mlgroup *mlgroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static int num_mlgroups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int numa_cpu_lookup_table[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct mdesc_mblock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) u64 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u64 offset; /* RA-to-PA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static struct mdesc_mblock *mblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static int num_mblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct mdesc_mblock *m = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) for (i = 0; i < num_mblocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) m = &mblocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (addr >= m->base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) addr < (m->base + m->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int prev_nid, new_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) prev_nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) for ( ; start < end; start += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct node_mem_mask *p = &node_masks[new_nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if ((start & p->mask) == p->match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (prev_nid == NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) prev_nid = new_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (new_nid == num_node_masks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) prev_nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (prev_nid != new_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *nid = prev_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return start > end ? end : start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) u64 ret_end, pa_start, m_mask, m_match, m_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct mdesc_mblock *mblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int _nid, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (tlb_type != hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return memblock_nid_range_sun4u(start, end, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) mblock = addr_to_mblock(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!mblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) _nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ret_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) pa_start = start + mblock->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) m_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) m_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) for (_nid = 0; _nid < num_node_masks; _nid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct node_mem_mask *const m = &node_masks[_nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if ((pa_start & m->mask) == m->match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) m_match = m->match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) m_mask = m->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (num_node_masks == _nid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /* We could not find NUMA group, so default to 0, but lets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * search for latency group, so we could calculate the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * end address that we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) _nid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) for (i = 0; i < num_mlgroups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct mdesc_mlgroup *const m = &mlgroups[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if ((pa_start & m->mask) == m->match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) m_match = m->match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) m_mask = m->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (i == num_mlgroups) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ret_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Each latency group has match and mask, and each memory block has an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * offset. An address belongs to a latency group if its address matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * the following formula: ((addr + offset) & mask) == match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * It is, however, slow to check every single page if it matches a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * particular latency group. As optimization we calculate end value by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * using bit arithmetics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ret_end = m_end > end ? end : m_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) *nid = _nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return ret_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* This must be invoked after performing all of the necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * memblock_set_node() calls for 'nid'. We need to be able to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * correct data from get_pfn_range_for_nid().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static void __init allocate_node_data(int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct pglist_data *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) unsigned long start_pfn, end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) SMP_CACHE_BYTES, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (!NODE_DATA(nid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) NODE_DATA(nid)->node_id = nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) p = NODE_DATA(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) p->node_start_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) p->node_spanned_pages = end_pfn - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static void init_node_masks_nonnuma(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) numadbg("Initializing tables for non-numa.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) node_masks[0].mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) node_masks[0].match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) num_node_masks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) for (i = 0; i < NR_CPUS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) numa_cpu_lookup_table[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) cpumask_setall(&numa_cpumask_lookup_table[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct pglist_data *node_data[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) EXPORT_SYMBOL(numa_cpu_lookup_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) EXPORT_SYMBOL(numa_cpumask_lookup_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) EXPORT_SYMBOL(node_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) u32 cfg_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) u64 arc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u64 target = mdesc_arc_target(md, arc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) const u64 *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) val = mdesc_get_property(md, target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) "cfg-handle", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (val && *val == cfg_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) u32 cfg_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) u64 arc, candidate, best_latency = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) candidate = MDESC_NODE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) u64 target = mdesc_arc_target(md, arc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) const char *name = mdesc_node_name(md, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) const u64 *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (strcmp(name, "pio-latency-group"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) val = mdesc_get_property(md, target, "latency", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (*val < best_latency) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) candidate = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) best_latency = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (candidate == MDESC_NODE_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int of_node_to_nid(struct device_node *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) const struct linux_prom64_registers *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct mdesc_handle *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) u32 cfg_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int count, nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) u64 grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* This is the right thing to do on currently supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * SUN4U NUMA platforms as well, as the PCI controller does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * not sit behind any particular memory controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (!mlgroups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) regs = of_get_property(dp, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) md = mdesc_grab();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) mdesc_for_each_node_by_name(md, grp, "group") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) nid = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) mdesc_release(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static void __init add_node_ranges(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) phys_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) unsigned long prev_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) memblock_resized:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) prev_max = memblock.memory.max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) for_each_mem_range(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) unsigned long this_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) this_end = memblock_nid_range(start, end, &nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) numadbg("Setting memblock NUMA node nid[%d] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) "start[%llx] end[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) nid, start, this_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) memblock_set_node(start, this_end - start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) &memblock.memory, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (memblock.memory.max != prev_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) goto memblock_resized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) start = this_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int __init grab_mlgroups(struct mdesc_handle *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) u64 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) mdesc_for_each_node_by_name(md, node, "memory-latency-group")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (!paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) mlgroups = __va(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) num_mlgroups = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct mdesc_mlgroup *m = &mlgroups[count++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) const u64 *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) m->node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) val = mdesc_get_property(md, node, "latency", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) m->latency = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) val = mdesc_get_property(md, node, "address-match", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) m->match = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) val = mdesc_get_property(md, node, "address-mask", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) m->mask = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) "match[%llx] mask[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) count - 1, m->node, m->latency, m->match, m->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static int __init grab_mblocks(struct mdesc_handle *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) u64 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) mdesc_for_each_node_by_name(md, node, "mblock")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (!paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) mblocks = __va(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) num_mblocks = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) mdesc_for_each_node_by_name(md, node, "mblock") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct mdesc_mblock *m = &mblocks[count++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) const u64 *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) val = mdesc_get_property(md, node, "base", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) m->base = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) val = mdesc_get_property(md, node, "size", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) m->size = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) val = mdesc_get_property(md, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) "address-congruence-offset", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* The address-congruence-offset property is optional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * Explicity zero it be identifty this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) m->offset = *val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) m->offset = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) count - 1, m->base, m->size, m->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) u64 grp, cpumask_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) u64 arc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) cpumask_clear(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) u64 target = mdesc_arc_target(md, arc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) const char *name = mdesc_node_name(md, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) const u64 *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (strcmp(name, "cpu"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) id = mdesc_get_property(md, target, "id", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (*id < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) cpumask_set_cpu(*id, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) for (i = 0; i < num_mlgroups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct mdesc_mlgroup *m = &mlgroups[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (m->node == node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int __node_distance(int from, int to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) pr_warn("Returning default NUMA distance value for %d->%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return numa_latency[from][to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) EXPORT_SYMBOL(__node_distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) for (i = 0; i < MAX_NUMNODES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct node_mem_mask *n = &node_masks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if ((grp->mask == n->mask) && (grp->match == n->match))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) u64 grp, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) u64 arc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int tnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) u64 target = mdesc_arc_target(md, arc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct mdesc_mlgroup *m = find_mlgroup(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) tnode = find_best_numa_node_for_mlgroup(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (tnode == MAX_NUMNODES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) numa_latency[index][tnode] = m->latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct mdesc_mlgroup *candidate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u64 arc, best_latency = ~(u64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct node_mem_mask *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u64 target = mdesc_arc_target(md, arc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct mdesc_mlgroup *m = find_mlgroup(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (m->latency < best_latency) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) candidate = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) best_latency = m->latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!candidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (num_node_masks != index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) printk(KERN_ERR "Inconsistent NUMA state, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) "index[%d] != num_node_masks[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) index, num_node_masks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) n = &node_masks[num_node_masks++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) n->mask = candidate->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) n->match = candidate->match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) index, n->mask, n->match, candidate->latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) cpumask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) numa_parse_mdesc_group_cpus(md, grp, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) for_each_cpu(cpu, &mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) numa_cpu_lookup_table[cpu] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (numa_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) for_each_cpu(cpu, &mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) printk("%d ", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) printk("]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return numa_attach_mlgroup(md, grp, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static int __init numa_parse_mdesc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct mdesc_handle *md = mdesc_grab();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) int i, j, err, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) u64 node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (node == MDESC_NODE_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) mdesc_release(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) err = grab_mblocks(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) err = grab_mlgroups(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) mdesc_for_each_node_by_name(md, node, "group") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) err = numa_parse_mdesc_group(md, node, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) mdesc_for_each_node_by_name(md, node, "group") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) find_numa_latencies_for_group(md, node, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /* Normalize numa latency matrix according to ACPI SLIT spec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) for (i = 0; i < MAX_NUMNODES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) u64 self_latency = numa_latency[i][i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) for (j = 0; j < MAX_NUMNODES; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) numa_latency[i][j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) (numa_latency[i][j] * LOCAL_DISTANCE) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) self_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) add_node_ranges();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) for (i = 0; i < num_node_masks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) allocate_node_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) node_set_online(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) mdesc_release(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static int __init numa_parse_jbus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) unsigned long cpu, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* NUMA node id is encoded in bits 36 and higher, and there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * a 1-to-1 mapping from CPU ID to NUMA node ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) numa_cpu_lookup_table[cpu] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) node_masks[index].mask = ~((1UL << 36UL) - 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) node_masks[index].match = cpu << 36UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) num_node_masks = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) add_node_ranges();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) for (index = 0; index < num_node_masks; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) allocate_node_data(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) node_set_online(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static int __init numa_parse_sun4u(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (tlb_type == cheetah || tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) unsigned long ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) __asm__ ("rdpr %%ver, %0" : "=r" (ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if ((ver >> 32UL) == __JALAPENO_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) (ver >> 32UL) == __SERRANO_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return numa_parse_jbus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static int __init bootmem_init_numa(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) numadbg("bootmem_init_numa()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* Some sane defaults for numa latency values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) for (i = 0; i < MAX_NUMNODES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) for (j = 0; j < MAX_NUMNODES; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) numa_latency[i][j] = (i == j) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) LOCAL_DISTANCE : REMOTE_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (numa_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) err = numa_parse_mdesc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) err = numa_parse_sun4u();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static int bootmem_init_numa(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static void __init bootmem_init_nonnuma(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) unsigned long top_of_ram = memblock_end_of_DRAM();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) unsigned long total_ram = memblock_phys_mem_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) numadbg("bootmem_init_nonnuma()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) top_of_ram, total_ram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) printk(KERN_INFO "Memory hole size: %ldMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) (top_of_ram - total_ram) >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) init_node_masks_nonnuma();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) allocate_node_data(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) node_set_online(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static unsigned long __init bootmem_init(unsigned long phys_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) unsigned long end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) max_pfn = max_low_pfn = end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) min_low_pfn = (phys_base >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (bootmem_init_numa() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) bootmem_init_nonnuma();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* Dump memblock with node info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /* XXX cpu notifier XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) sparse_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static int pall_ents __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static unsigned long max_phys_bits = 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) bool kern_addr_valid(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if ((long)addr < 0L) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) unsigned long pa = __pa(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if ((pa >> max_phys_bits) != 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return pfn_valid(pa >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (addr >= (unsigned long) KERNBASE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) addr < (unsigned long)&_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) pgd = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (pgd_none(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (p4d_none(*p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (pud_none(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (pud_large(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return pfn_valid(pud_pfn(*pud));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (pmd_none(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (pmd_large(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return pfn_valid(pmd_pfn(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) pte = pte_offset_kernel(pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (pte_none(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return pfn_valid(pte_pfn(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) EXPORT_SYMBOL(kern_addr_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) unsigned long vend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) pud_t *pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) const unsigned long mask16gb = (1UL << 34) - 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) u64 pte_val = vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* Each PUD is 8GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if ((vstart & mask16gb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) (vend - vstart <= mask16gb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) pte_val ^= kern_linear_pte_xor[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return vstart + PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) pte_val ^= kern_linear_pte_xor[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) pte_val |= _PAGE_PUD_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) vend = vstart + mask16gb + 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) while (vstart < vend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) pud_val(*pud) = pte_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) pte_val += PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) vstart += PUD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) pud++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) bool guard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) unsigned long vend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) const unsigned long mask256mb = (1UL << 28) - 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) const unsigned long mask2gb = (1UL << 31) - 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) u64 pte_val = vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* Each PMD is 8MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if ((vstart & mask256mb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) (vend - vstart <= mask256mb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) pte_val ^= kern_linear_pte_xor[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return vstart + PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if ((vstart & mask2gb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) (vend - vstart <= mask2gb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) pte_val ^= kern_linear_pte_xor[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) pte_val |= _PAGE_PMD_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) vend = vstart + mask256mb + 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) pte_val ^= kern_linear_pte_xor[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) pte_val |= _PAGE_PMD_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) vend = vstart + mask2gb + 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) while (vstart < vend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) pmd_val(*pmd) = pte_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) pte_val += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) vstart += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) pmd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) return vstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) bool guard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static unsigned long __ref kernel_map_range(unsigned long pstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) unsigned long pend, pgprot_t prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) bool use_huge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) unsigned long vstart = PAGE_OFFSET + pstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) unsigned long vend = PAGE_OFFSET + pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) unsigned long alloc_bytes = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) vstart, vend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) while (vstart < vend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) unsigned long this_end, paddr = __pa(vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) pgd_t *pgd = pgd_offset_k(vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (pgd_none(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) pud_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) alloc_bytes += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) pgd_populate(&init_mm, pgd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) p4d = p4d_offset(pgd, vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (p4d_none(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) pud_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) alloc_bytes += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) p4d_populate(&init_mm, p4d, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) pud = pud_offset(p4d, vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) pmd_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) vstart = kernel_map_hugepud(vstart, vend, pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) alloc_bytes += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) pud_populate(&init_mm, pud, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) pmd = pmd_offset(pud, vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) pte_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) vstart = kernel_map_hugepmd(vstart, vend, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) alloc_bytes += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) pmd_populate_kernel(&init_mm, pmd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) pte = pte_offset_kernel(pmd, vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) this_end = (vstart + PMD_SIZE) & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (this_end > vend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) this_end = vend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) while (vstart < this_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) pte_val(*pte) = (paddr | pgprot_val(prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) vstart += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) paddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) pte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return alloc_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static void __init flush_all_kernel_tsbs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct tsb *ent = &swapper_tsb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) ent->tag = (1UL << TSB_TAG_INVALID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct tsb *ent = &swapper_4m_tsb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ent->tag = (1UL << TSB_TAG_INVALID_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) extern unsigned int kvmap_linear_patch[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static void __init kernel_physical_mapping_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) unsigned long i, mem_alloced = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) bool use_huge = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) #ifdef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) use_huge = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) for (i = 0; i < pall_ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) unsigned long phys_start, phys_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) phys_start = pall[i].phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) phys_end = phys_start + pall[i].reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) mem_alloced += kernel_map_range(phys_start, phys_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) PAGE_KERNEL, use_huge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) printk("Allocated %ld bytes for kernel page tables.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) mem_alloced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) kvmap_linear_patch[0] = 0x01000000; /* nop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) flushi(&kvmap_linear_patch[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) flush_all_kernel_tsbs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) #ifdef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) void __kernel_map_pages(struct page *page, int numpages, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) kernel_map_range(phys_start, phys_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) (enable ? PAGE_KERNEL : __pgprot(0)), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) PAGE_OFFSET + phys_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /* we should perform an IPI and flush all tlbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * but that can deadlock->flush only current cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) PAGE_OFFSET + phys_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) unsigned long __init find_ecache_flush_span(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) for (i = 0; i < pavail_ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (pavail[i].reg_size >= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return pavail[i].phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) return ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) unsigned long PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) EXPORT_SYMBOL(PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) unsigned long VMALLOC_END = 0x0000010000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) EXPORT_SYMBOL(VMALLOC_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static void __init setup_page_offset(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (tlb_type == cheetah || tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* Cheetah/Panther support a full 64-bit virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * address, so we can use all that our page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) sparc64_va_hole_top = 0xfff0000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) sparc64_va_hole_bottom = 0x0010000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) max_phys_bits = 42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) } else if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) switch (sun4v_chip_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) case SUN4V_CHIP_NIAGARA1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) case SUN4V_CHIP_NIAGARA2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /* T1 and T2 support 48-bit virtual addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) sparc64_va_hole_top = 0xffff800000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) sparc64_va_hole_bottom = 0x0000800000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) max_phys_bits = 39;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) case SUN4V_CHIP_NIAGARA3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /* T3 supports 48-bit virtual addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) sparc64_va_hole_top = 0xffff800000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) sparc64_va_hole_bottom = 0x0000800000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) max_phys_bits = 43;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) case SUN4V_CHIP_NIAGARA4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) case SUN4V_CHIP_NIAGARA5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) case SUN4V_CHIP_SPARC64X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) case SUN4V_CHIP_SPARC_M6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /* T4 and later support 52-bit virtual addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) sparc64_va_hole_top = 0xfff8000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) sparc64_va_hole_bottom = 0x0008000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) max_phys_bits = 47;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) case SUN4V_CHIP_SPARC_M7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) case SUN4V_CHIP_SPARC_SN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /* M7 and later support 52-bit virtual addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) sparc64_va_hole_top = 0xfff8000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) sparc64_va_hole_bottom = 0x0008000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) max_phys_bits = 49;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) case SUN4V_CHIP_SPARC_M8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* M8 and later support 54-bit virtual addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * However, restricting M8 and above VA bits to 53
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * as 4-level page table cannot support more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * 53 VA bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) sparc64_va_hole_top = 0xfff0000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) sparc64_va_hole_bottom = 0x0010000000000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) max_phys_bits = 51;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) max_phys_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) PAGE_OFFSET = sparc64_va_hole_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) (sparc64_va_hole_bottom >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) PAGE_OFFSET, max_phys_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) VMALLOC_START, VMALLOC_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) VMEMMAP_BASE, VMEMMAP_BASE << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static void __init tsb_phys_patch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct tsb_ldquad_phys_patch_entry *pquad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) struct tsb_phys_patch_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) pquad = &__tsb_ldquad_phys_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) while (pquad < &__tsb_ldquad_phys_patch_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) unsigned long addr = pquad->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) *(unsigned int *) addr = pquad->sun4v_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) *(unsigned int *) addr = pquad->sun4u_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) __asm__ __volatile__("flush %0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) pquad++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) p = &__tsb_phys_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) while (p < &__tsb_phys_patch_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) unsigned long addr = p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) *(unsigned int *) addr = p->insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) __asm__ __volatile__("flush %0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) /* Don't mark as init, we give this to the Hypervisor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) #define NUM_KTSB_DESCR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) #define NUM_KTSB_DESCR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* The swapper TSBs are loaded with a base sequence of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * sethi %uhi(SYMBOL), REG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * sethi %hi(SYMBOL), REG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * or REG1, %ulo(SYMBOL), REG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * or REG2, %lo(SYMBOL), REG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * sllx REG1, 32, REG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * or REG1, REG2, REG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * When we use physical addressing for the TSB accesses, we patch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * first four instructions in the above sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) unsigned long high_bits, low_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) high_bits = (pa >> 32) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) low_bits = (pa >> 0) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) unsigned int *ia = (unsigned int *)(unsigned long)*start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) __asm__ __volatile__("flush %0" : : "r" (ia));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) __asm__ __volatile__("flush %0" : : "r" (ia + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) __asm__ __volatile__("flush %0" : : "r" (ia + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) __asm__ __volatile__("flush %0" : : "r" (ia + 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static void ktsb_phys_patch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) extern unsigned int __swapper_tsb_phys_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) extern unsigned int __swapper_tsb_phys_patch_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) unsigned long ktsb_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) &__swapper_tsb_phys_patch_end, ktsb_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) extern unsigned int __swapper_4m_tsb_phys_patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) extern unsigned int __swapper_4m_tsb_phys_patch_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) ktsb_pa = (kern_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static void __init sun4v_ktsb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) unsigned long ktsb_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /* First KTSB for PAGE_SIZE mappings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) switch (PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) case 8 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) case 64 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) case 512 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) case 4 * 1024 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) ktsb_descr[0].assoc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) ktsb_descr[0].ctx_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) ktsb_descr[0].tsb_base = ktsb_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ktsb_descr[0].resv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) ktsb_pa = (kern_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) HV_PGSZ_MASK_256MB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) HV_PGSZ_MASK_2GB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) HV_PGSZ_MASK_16GB) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) cpu_pgsz_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) ktsb_descr[1].assoc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) ktsb_descr[1].ctx_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) ktsb_descr[1].tsb_base = ktsb_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ktsb_descr[1].resv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) void sun4v_ktsb_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) unsigned long pa, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) "errors with %lx\n", pa, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) static void __init sun4u_linear_pte_xor_finalize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /* This is where we would add Panther support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) * 32MB and 256MB pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) static void __init sun4v_linear_pte_xor_finalize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) unsigned long pagecv_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * enables MCD error. Do not set bit 9 on M7 processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) switch (sun4v_chip_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) case SUN4V_CHIP_SPARC_M7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) case SUN4V_CHIP_SPARC_M8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) case SUN4V_CHIP_SPARC_SN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) pagecv_flag = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) pagecv_flag = _PAGE_CV_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) _PAGE_P_4V | _PAGE_W_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) _PAGE_P_4V | _PAGE_W_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) _PAGE_P_4V | _PAGE_W_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* paging_init() sets up the page tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) static unsigned long last_valid_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static void sun4u_pgprot_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) static void sun4v_pgprot_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* We need to exclude reserved regions. This exclusion will include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * vmlinux and initrd. To be more precise the initrd size could be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * compute a new lower limit because it is freed later during initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) static void __init reduce_memory(phys_addr_t limit_ram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) limit_ram += memblock_reserved_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) memblock_enforce_memory_limit(limit_ram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) void __init paging_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) unsigned long end_pfn, shift, phys_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) unsigned long real_end, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) setup_page_offset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) /* These build time checkes make sure that the dcache_dirty_cpu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * page->flags usage will work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * When a page gets marked as dcache-dirty, we store the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * cpu number starting at bit 32 in the page->flags. Also,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) * functions like clear_dcache_dirty_cpu use the cpu mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * in 13-bit signed-immediate instruction fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * Page flags must not reach into upper 32 bits that are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * for the cpu number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) BUILD_BUG_ON(NR_PAGEFLAGS > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * The bit fields placed in the high range must not reach below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) * the 32 bit boundary. Otherwise we cannot place the cpu field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * at the 32 bit boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) BUILD_BUG_ON(NR_CPUS > 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) /* Invalidate both kernel TSBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * bit on M7 processor. This is a conflicting usage of the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) * Detection error on all pages and this will lead to problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) * later. Kernel does not run with MCD enabled and hence rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * of the required steps to fully configure memory corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * detection are not taken. We need to ensure TTE.mcde is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * set on M7 processor. Compute the value of cacheability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * flag for use later taking this into consideration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) switch (sun4v_chip_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) case SUN4V_CHIP_SPARC_M7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) case SUN4V_CHIP_SPARC_M8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) case SUN4V_CHIP_SPARC_SN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) page_cache4v_flag = _PAGE_CP_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) page_cache4v_flag = _PAGE_CACHE_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) sun4v_pgprot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) sun4u_pgprot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (tlb_type == cheetah_plus ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) tsb_phys_patch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) ktsb_phys_patch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) sun4v_patch_tlb_handlers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) /* Find available physical memory...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * Read it twice in order to work around a bug in openfirmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * The call to grab this table itself can cause openfirmware to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * allocate memory, which in turn can take away some space from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * the list of available memory. Reading it twice makes sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * we really do get the final value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) read_obp_translations();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) read_obp_memory("reg", &pall[0], &pall_ents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) read_obp_memory("available", &pavail[0], &pavail_ents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) read_obp_memory("available", &pavail[0], &pavail_ents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) phys_base = 0xffffffffffffffffUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) for (i = 0; i < pavail_ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) phys_base = min(phys_base, pavail[i].phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) memblock_reserve(kern_base, kern_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) find_ramdisk(phys_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (cmdline_memory_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) reduce_memory(cmdline_memory_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) memblock_allow_resize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) set_bit(0, mmu_context_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) real_end = (unsigned long)_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) num_kernel_image_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Set kernel pgd to upper alias so physical page computations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) init_mm.pgd += ((shift) / (sizeof(pgd_t)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) inherit_prom_mappings();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) /* Ok, we can use our TLB miss and window trap handlers safely. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) setup_tba();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) prom_build_devicetree();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) of_populate_present_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) of_fill_in_cpu_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) sun4v_mdesc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) mdesc_populate_present_mask(cpu_all_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) mdesc_fill_in_cpu_data(cpu_all_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) sun4v_linear_pte_xor_finalize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) sun4v_ktsb_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) sun4v_ktsb_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) unsigned long impl, ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) impl = ((ver >> 32) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if (impl == PANTHER_IMPL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) HV_PGSZ_MASK_256MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) sun4u_linear_pte_xor_finalize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* Flush the TLBs and the 4M TSB so that the updated linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * pte XOR settings are realized for all mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) #ifndef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) /* Setup bootmem... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) last_valid_pfn = end_pfn = bootmem_init(phys_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) kernel_physical_mapping_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) unsigned long max_zone_pfns[MAX_NR_ZONES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) max_zone_pfns[ZONE_NORMAL] = end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) free_area_init(max_zone_pfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) printk("Booting Linux...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) int page_in_phys_avail(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) paddr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) for (i = 0; i < pavail_ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) start = pavail[i].phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) end = start + pavail[i].reg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (paddr >= start && paddr < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (paddr >= kern_base && paddr < (kern_base + kern_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) if (paddr >= __pa(initrd_start) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) paddr < __pa(PAGE_ALIGN(initrd_end)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) static void __init register_page_bootmem_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) for_each_online_node(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (NODE_DATA(i)->node_spanned_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) register_page_bootmem_info_node(NODE_DATA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) void __init mem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) high_memory = __va(last_valid_pfn << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) memblock_free_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) * Must be done after boot memory is put on freelist, because here we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * might set fields in deferred struct pages that have not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) * initialized, and memblock_free_all() initializes all the reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * deferred pages for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) register_page_bootmem_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * Set up the zero page, mark it reserved, so that page count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * is not manipulated when freeing the page from user ptes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (mem_map_zero == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) prom_printf("paging_init: Cannot alloc zero page.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) mark_page_reserved(mem_map_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) mem_init_print_info(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (tlb_type == cheetah || tlb_type == cheetah_plus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) cheetah_ecache_flush_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) void free_initmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) unsigned long addr, initend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) int do_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* If the physical memory maps were trimmed by kernel command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * line options, don't even try freeing this initmem stuff up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * The kernel image could have been in the trimmed out region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) * and if so the freeing below will free invalid page structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (cmdline_memory_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) do_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) addr = PAGE_ALIGN((unsigned long)(__init_begin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) initend = (unsigned long)(__init_end) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) for (; addr < initend; addr += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) unsigned long page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) page = (addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) ((unsigned long) __va(kern_base)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) ((unsigned long) KERNBASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (do_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) free_reserved_page(virt_to_page(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) pgprot_t PAGE_KERNEL __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) EXPORT_SYMBOL(PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) pgprot_t PAGE_COPY __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) pgprot_t PAGE_SHARED __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) EXPORT_SYMBOL(PAGE_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) unsigned long pg_iobits __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) unsigned long _PAGE_IE __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) EXPORT_SYMBOL(_PAGE_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) unsigned long _PAGE_E __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) EXPORT_SYMBOL(_PAGE_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) unsigned long _PAGE_CACHE __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) EXPORT_SYMBOL(_PAGE_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) #ifdef CONFIG_SPARSEMEM_VMEMMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) int node, struct vmem_altmap *altmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) unsigned long pte_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) _PAGE_CP_4U | _PAGE_CV_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) _PAGE_P_4U | _PAGE_W_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) pte_base |= _PAGE_PMD_HUGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) vstart = vstart & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) vend = ALIGN(vend, PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) for (; vstart < vend; vstart += PMD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) unsigned long pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (!pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) p4d = vmemmap_p4d_populate(pgd, vstart, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) pud = vmemmap_pud_populate(p4d, vstart, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (!pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) pmd = pmd_offset(pud, vstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) pte = pmd_val(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (!(pte & _PAGE_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) void *block = vmemmap_alloc_block(PMD_SIZE, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) pmd_val(*pmd) = pte_base | __pa(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) void vmemmap_free(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) struct vmem_altmap *altmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) #endif /* CONFIG_SPARSEMEM_VMEMMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static void prot_init_common(unsigned long page_none,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) unsigned long page_shared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) unsigned long page_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) unsigned long page_readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) unsigned long page_exec_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) PAGE_COPY = __pgprot(page_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) PAGE_SHARED = __pgprot(page_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) protection_map[0x0] = __pgprot(page_none);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) protection_map[0x4] = __pgprot(page_readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) protection_map[0x5] = __pgprot(page_readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) protection_map[0x6] = __pgprot(page_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) protection_map[0x7] = __pgprot(page_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) protection_map[0x8] = __pgprot(page_none);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) protection_map[0xc] = __pgprot(page_readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) protection_map[0xd] = __pgprot(page_readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) protection_map[0xe] = __pgprot(page_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) protection_map[0xf] = __pgprot(page_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) static void __init sun4u_pgprot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) unsigned long page_none, page_shared, page_copy, page_readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) unsigned long page_exec_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) _PAGE_CACHE_4U | _PAGE_P_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) __ACCESS_BITS_4U | __DIRTY_BITS_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) _PAGE_EXEC_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) _PAGE_CACHE_4U | _PAGE_P_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) __ACCESS_BITS_4U | __DIRTY_BITS_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) _PAGE_EXEC_4U | _PAGE_L_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) _PAGE_IE = _PAGE_IE_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) _PAGE_E = _PAGE_E_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) _PAGE_CACHE = _PAGE_CACHE_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) __ACCESS_BITS_4U | _PAGE_E_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) #ifdef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) _PAGE_P_4U | _PAGE_W_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) for (i = 1; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) __ACCESS_BITS_4U | _PAGE_EXEC_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) __ACCESS_BITS_4U | _PAGE_EXEC_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) page_exec_bit = _PAGE_EXEC_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) prot_init_common(page_none, page_shared, page_copy, page_readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) static void __init sun4v_pgprot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) unsigned long page_none, page_shared, page_copy, page_readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) unsigned long page_exec_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) page_cache4v_flag | _PAGE_P_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) __ACCESS_BITS_4V | __DIRTY_BITS_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) _PAGE_EXEC_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) PAGE_KERNEL_LOCKED = PAGE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) _PAGE_IE = _PAGE_IE_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) _PAGE_E = _PAGE_E_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) _PAGE_CACHE = page_cache4v_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) #ifdef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) _PAGE_W_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) for (i = 1; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) __ACCESS_BITS_4V | _PAGE_E_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) __ACCESS_BITS_4V | _PAGE_EXEC_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) __ACCESS_BITS_4V | _PAGE_EXEC_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) page_exec_bit = _PAGE_EXEC_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) prot_init_common(page_none, page_shared, page_copy, page_readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) page_exec_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) unsigned long pte_sz_bits(unsigned long sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) switch (sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) case 8 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) return _PAGE_SZ8K_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) case 64 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) return _PAGE_SZ64K_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) case 512 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return _PAGE_SZ512K_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) case 4 * 1024 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) return _PAGE_SZ4MB_4V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) switch (sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) case 8 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return _PAGE_SZ8K_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) case 64 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) return _PAGE_SZ64K_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) case 512 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return _PAGE_SZ512K_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) case 4 * 1024 * 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return _PAGE_SZ4MB_4U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) pte_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) pte_val(pte) |= (((unsigned long)space) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) pte_val(pte) |= pte_sz_bits(page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) static unsigned long kern_large_tte(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) page_cache4v_flag | _PAGE_P_4V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) _PAGE_EXEC_4V | _PAGE_W_4V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return val | paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) /* If not locked, zap it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) void __flush_tlb_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) unsigned long pstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) __asm__ __volatile__("flushw\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) "rdpr %%pstate, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) "wrpr %0, %1, %%pstate"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) : "=r" (pstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) : "i" (PSTATE_IE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) sun4v_mmu_demap_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) } else if (tlb_type == spitfire) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) for (i = 0; i < 64; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) /* Spitfire Errata #32 workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) /* NOTE: Always runs on spitfire, so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * cheetah+ page size encodings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) __asm__ __volatile__("stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) "flush %%g6"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) : /* No outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) : "r" (0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) spitfire_put_dtlb_data(i, 0x0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) /* Spitfire Errata #32 workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /* NOTE: Always runs on spitfire, so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) * cheetah+ page size encodings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) __asm__ __volatile__("stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) "flush %%g6"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) : /* No outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) : "r" (0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) spitfire_put_itlb_data(i, 0x0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) cheetah_flush_dtlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) cheetah_flush_itlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) __asm__ __volatile__("wrpr %0, 0, %%pstate"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) : : "r" (pstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) pte_t *pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) pte = (pte_t *) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) pgtable_t pte_alloc_one(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (!pgtable_pte_page_ctor(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) return (pte_t *) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) free_page((unsigned long)pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) static void __pte_free(pgtable_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) struct page *page = virt_to_page(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) pgtable_pte_page_dtor(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) void pte_free(struct mm_struct *mm, pgtable_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) __pte_free(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) void pgtable_free(void *table, bool is_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (is_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) __pte_free(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) kmem_cache_free(pgtable_cache, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) unsigned long pte, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) pmd_t entry = *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (!pmd_large(entry) || !pmd_young(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) pte = pmd_val(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) if (!(pte & _PAGE_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) /* We are fabricating 8MB pages using 4MB real hw pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) mm = vma->vm_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) spin_lock_irqsave(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) addr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) spin_unlock_irqrestore(&mm->context.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) static void context_reload(void *__data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) struct mm_struct *mm = __data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) if (mm == current->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) load_secondary_context(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) void hugetlb_setup(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) struct tsb_config *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (faulthandler_disabled() || !mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) pr_alert("Unexpected HugeTLB setup in atomic context.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) die_if_kernel("HugeTSB in atomic", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) tp = &mm->context.tsb_block[MM_TSB_HUGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) if (likely(tp->tsb == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) tsb_grow(mm, MM_TSB_HUGE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) tsb_context_switch(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) smp_tsb_sync(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) /* On UltraSPARC-III+ and later, configure the second half of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * the Data-TLB for huge pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) bool need_context_reload = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) unsigned long ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) spin_lock_irq(&ctx_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) ctx = mm->context.sparc64_ctx_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) ctx &= ~CTX_PGSZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) if (ctx != mm->context.sparc64_ctx_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) /* When changing the page size fields, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) * must perform a context flush so that no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) * stale entries match. This flush must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) * occur with the original context register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) do_flush_tlb_mm(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /* Reload the context register of all processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) * also executing in this address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) mm->context.sparc64_ctx_val = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) need_context_reload = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) spin_unlock_irq(&ctx_alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) if (need_context_reload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) on_each_cpu(context_reload, mm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) static struct resource code_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) .name = "Kernel code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) static struct resource data_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) .name = "Kernel data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) static struct resource bss_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) .name = "Kernel bss",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) static inline resource_size_t compute_kern_paddr(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) return (resource_size_t) (addr - KERNBASE + kern_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) static void __init kernel_lds_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) code_resource.start = compute_kern_paddr(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) code_resource.end = compute_kern_paddr(_etext - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) data_resource.start = compute_kern_paddr(_etext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) data_resource.end = compute_kern_paddr(_edata - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) bss_resource.start = compute_kern_paddr(__bss_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) bss_resource.end = compute_kern_paddr(_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) static int __init report_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) kernel_lds_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) for (i = 0; i < pavail_ents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) res = kzalloc(sizeof(struct resource), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) pr_warn("Failed to allocate source.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) res->name = "System RAM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) res->start = pavail[i].phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) if (insert_resource(&iomem_resource, res) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) pr_warn("Resource insertion failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) insert_resource(res, &code_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) insert_resource(res, &data_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) insert_resource(res, &bss_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) arch_initcall(report_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (start < LOW_OBP_ADDRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (end > HI_OBP_ADDRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) flush_tsb_kernel_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) do_flush_tlb_kernel_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) void copy_user_highpage(struct page *to, struct page *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) unsigned long vaddr, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) char *vfrom, *vto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) vfrom = kmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) vto = kmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) copy_user_page(vto, vfrom, vaddr, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) kunmap_atomic(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) kunmap_atomic(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) /* If this page has ADI enabled, copy over any ADI tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) * as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) if (vma->vm_flags & VM_SPARC_ADI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) unsigned long pfrom, pto, i, adi_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) pfrom = page_to_phys(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) pto = page_to_phys(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) asm volatile("ldxa [%1] %2, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) : "=r" (adi_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) : "r" (i), "i" (ASI_MCD_REAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) asm volatile("stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) : "r" (adi_tag), "r" (pto),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) "i" (ASI_MCD_REAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) pto += adi_blksize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) asm volatile("membar #Sync\n\t");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) EXPORT_SYMBOL(copy_user_highpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) void copy_highpage(struct page *to, struct page *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) char *vfrom, *vto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) vfrom = kmap_atomic(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) vto = kmap_atomic(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) copy_page(vto, vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) kunmap_atomic(vto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) kunmap_atomic(vfrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) /* If this platform is ADI enabled, copy any ADI tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) * as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (adi_capable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) unsigned long pfrom, pto, i, adi_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) pfrom = page_to_phys(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) pto = page_to_phys(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) asm volatile("ldxa [%1] %2, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) : "=r" (adi_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) : "r" (i), "i" (ASI_MCD_REAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) asm volatile("stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) : "r" (adi_tag), "r" (pto),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) "i" (ASI_MCD_REAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) pto += adi_blksize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) asm volatile("membar #Sync\n\t");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) EXPORT_SYMBOL(copy_highpage);