^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Architecture-specific setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Stephane Eranian <eranian@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2000, 2004 Intel Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Rohit Seth <rohit.seth@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Suresh Siddha <suresh.b.siddha@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Gordon Jin <gordon.jin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 1999 VA Linux Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 12/26/04 S.Siddha, G.Jin, R.Seth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Add multi-threading and multi-core detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * 03/31/00 R.Seth cpu_initialized and current->processor fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * 02/01/00 R.Seth fixed get_cpuinfo for SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * 01/07/99 S.Eranian added the support for command line argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * 06/24/99 W.Drummond added boot_cpu_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/kdev_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/screen_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/root_dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/serial.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/serial_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/mca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/meminit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/patch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/sal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <asm/uv/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <asm/xtp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) # error "struct cpuinfo_ia64 too big!"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) char ia64_platform_name[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long __per_cpu_offset[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) EXPORT_SYMBOL(__per_cpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) EXPORT_SYMBOL(ia64_cpu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EXPORT_SYMBOL(local_per_cpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long ia64_cycles_per_usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct ia64_boot_param *ia64_boot_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct screen_info screen_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long vga_console_iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long vga_console_membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static struct resource data_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) .name = "Kernel data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static struct resource code_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .name = "Kernel code",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct resource bss_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .name = "Kernel bss",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long ia64_max_cacheline_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long ia64_iobase; /* virtual address for I/O accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL(ia64_iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct io_space io_space[MAX_IO_SPACES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) EXPORT_SYMBOL(io_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned int num_io_spaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * "flush_icache_range()" needs to know what processor dependent stride size to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * when it makes i-cache(s) coherent with d-caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long ia64_i_cache_stride_shift = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * "clflush_cache_range()" needs to know what processor dependent stride size to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * use when it flushes cache lines including both d-cache and i-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Safest way to go: 32 bytes by 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define CACHE_STRIDE_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long ia64_cache_stride_shift = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * We use a special marker for the end of memory and it uses the extra (+1) slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int num_rsvd_regions __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Filter incoming memory segments based on the primitive map created from the boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * parameters. Segments contained in the map are removed from the memory ranges. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * caller-specified function is called with the memory ranges that remain after filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * This routine does not assume the incoming segments are sorted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) filter_rsvd_memory (u64 start, u64 end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u64 range_start, range_end, prev_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void (*func)(unsigned long, unsigned long, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #if IGNORE_PFN0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (start == PAGE_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) printk(KERN_WARNING "warning: skipping physical page 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (start >= end) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * lowest possible address(walker uses virtual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) prev_start = PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) func = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) for (i = 0; i < num_rsvd_regions; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) range_start = max(start, prev_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) range_end = min(end, rsvd_region[i].start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (range_start < range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) call_pernode_memory(__pa(range_start), range_end - range_start, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* nothing more available in this segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (range_end == end) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) prev_start = rsvd_region[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* end of memory marker allows full processing inside loop body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Similar to "filter_rsvd_memory()", but the reserved memory ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * are not filtered out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) filter_memory(u64 start, u64 end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void (*func)(unsigned long, unsigned long, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #if IGNORE_PFN0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (start == PAGE_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) printk(KERN_WARNING "warning: skipping physical page 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (start >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) func = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (start < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) call_pernode_memory(__pa(start), end - start, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) sort_regions (struct rsvd_region *rsvd_region, int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* simple bubble sorting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) while (max--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (j = 0; j < max; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (rsvd_region[j].start > rsvd_region[j+1].start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct rsvd_region tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) tmp = rsvd_region[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rsvd_region[j] = rsvd_region[j + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) rsvd_region[j + 1] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* merge overlaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) merge_regions (struct rsvd_region *rsvd_region, int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) for (i = 1; i < max; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (rsvd_region[i].start >= rsvd_region[i-1].end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (rsvd_region[i].end > rsvd_region[i-1].end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rsvd_region[i-1].end = rsvd_region[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) --max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) memmove(&rsvd_region[i], &rsvd_region[i+1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) (max - i) * sizeof(struct rsvd_region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Request address space for all standard resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int __init register_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) code_resource.start = ia64_tpa(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) code_resource.end = ia64_tpa(_etext) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) data_resource.start = ia64_tpa(_etext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) data_resource.end = ia64_tpa(_edata) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) bss_resource.start = ia64_tpa(__bss_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) bss_resource.end = ia64_tpa(_end) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) efi_initialize_iomem_resources(&code_resource, &data_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) &bss_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) __initcall(register_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * This function checks if the reserved crashkernel is allowed on the specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * IA64 machine flavour. Machines without an IO TLB use swiotlb and require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * some memory below 4 GB (i.e. in 32 bit area), see the implementation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * kernel/dma/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * in kdump case. See the comment in sba_init() in sba_iommu.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * So, the only machvec that really supports loading the kdump kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * over 4 GB is "uv".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (is_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return pbase < (1UL << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void __init setup_crashkernel(unsigned long total, int *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long long base = 0, size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ret = parse_crashkernel(boot_command_line, total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) &size, &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ret == 0 && size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) sort_regions(rsvd_region, *n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *n = merge_regions(rsvd_region, *n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) base = kdump_find_rsvd_region(size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rsvd_region, *n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!check_crashkernel_memory(base, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pr_warn("crashkernel: There would be kdump memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) "at %ld GB but this is unusable because it "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) "must\nbe below 4 GB. Change the memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) "configuration of the machine.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) (unsigned long)(base >> 30));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (base != ~0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) "for crashkernel (System RAM: %ldMB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) (unsigned long)(size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) (unsigned long)(base >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) (unsigned long)(total >> 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) rsvd_region[*n].start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) (unsigned long)__va(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) rsvd_region[*n].end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) (unsigned long)__va(base + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) (*n)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) crashk_res.start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) crashk_res.end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) efi_memmap_res.start = ia64_boot_param->efi_memmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) efi_memmap_res.end = efi_memmap_res.start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) boot_param_res.start = __pa(ia64_boot_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) boot_param_res.end = boot_param_res.start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sizeof(*ia64_boot_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline void __init setup_crashkernel(unsigned long total, int *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * reserve_memory - setup reserved memory areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Setup the reserved memory areas set aside for the boot parameters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * see arch/ia64/include/asm/meminit.h if you need to define more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) reserve_memory (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long total_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * none of the entries in this table overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rsvd_region[n].start = (unsigned long) ia64_boot_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rsvd_region[n].end = (rsvd_region[n].start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) + strlen(__va(ia64_boot_param->command_line)) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rsvd_region[n].end = (unsigned long) ia64_imva(_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ia64_boot_param->initrd_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (reserve_elfcorehdr(&rsvd_region[n].start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) &rsvd_region[n].end) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) setup_crashkernel(total_memory, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* end of memory marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rsvd_region[n].start = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rsvd_region[n].end = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) num_rsvd_regions = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) sort_regions(rsvd_region, num_rsvd_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* reserve all regions except the end of memory marker with memblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) for (n = 0; n < num_rsvd_regions - 1; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct rsvd_region *region = &rsvd_region[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) phys_addr_t addr = __pa(region->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) phys_addr_t size = region->end - region->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) memblock_reserve(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * find_initrd - get initrd parameters from the boot parameter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Grab the initrd start and end from the boot parameter struct given us by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * the boot loader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) find_initrd (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ia64_boot_param->initrd_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) initrd_end = initrd_start+ia64_boot_param->initrd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) initrd_start, ia64_boot_param->initrd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) io_port_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unsigned long phys_iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Set `iobase' based on the EFI memory map or, failing that, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * value firmware left in ar.k0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * the port's virtual address, so ia32_load_state() loads it with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * user virtual address. But in ia64 mode, glibc uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * *physical* address in ar.k0 to mmap the appropriate area from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * cases, user-mode can only use the legacy 0-64K I/O port space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * ar.k0 is not involved in kernel I/O port accesses, which can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * any of the I/O port spaces and are done via MMIO using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * virtual mmio_base from the appropriate io_space[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) phys_iobase = efi_get_iobase();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!phys_iobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) printk(KERN_INFO "No I/O port range found in EFI memory map, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* setup legacy IO port space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) io_space[0].mmio_base = ia64_iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) io_space[0].sparse = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) num_io_spaces = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * early_console_setup - setup debugging console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Consoles started here require little enough setup that we can start using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * them very early in the boot process, either right after the machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * vector initialization, or even before if the drivers can detect their hw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Returns non-zero if a console couldn't be setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static inline int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) early_console_setup (char *cmdline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #ifdef CONFIG_EFI_PCDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!efi_setup_pcdp_console(cmdline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) screen_info_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unsigned int orig_x, orig_y, num_cols, num_rows, font_height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) memset(&screen_info, 0, sizeof(screen_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (!ia64_boot_param->console_info.num_rows ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) !ia64_boot_param->console_info.num_cols) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) printk(KERN_WARNING "invalid screen-info, guessing 80x25\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) orig_x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) orig_y = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) num_cols = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) num_rows = 25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) font_height = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) orig_x = ia64_boot_param->console_info.orig_x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) orig_y = ia64_boot_param->console_info.orig_y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) num_cols = ia64_boot_param->console_info.num_cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) num_rows = ia64_boot_param->console_info.num_rows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) font_height = 400 / num_rows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) screen_info.orig_x = orig_x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) screen_info.orig_y = orig_y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) screen_info.orig_video_cols = num_cols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) screen_info.orig_video_lines = num_rows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) screen_info.orig_video_points = font_height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) screen_info.orig_video_mode = 3; /* XXX fake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) screen_info.orig_video_isVGA = 1; /* XXX fake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) screen_info.orig_video_ega_bx = 3; /* XXX fake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) mark_bsp_online (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* If we register an early console, allow CPU 0 to printk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) set_cpu_online(smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static __initdata int nomca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static __init int setup_nomca(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) nomca = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) early_param("nomca", setup_nomca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int __init reserve_elfcorehdr(u64 *start, u64 *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) u64 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* We get the address using the kernel command line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * but the size is extracted from the EFI tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Both address and size are required for reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * to work properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!is_vmcore_usable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) vmcore_unusable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) *start = (unsigned long)__va(elfcorehdr_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *end = *start + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #endif /* CONFIG_PROC_VMCORE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) setup_arch (char **cmdline_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unw_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *cmdline_p = __va(ia64_boot_param->command_line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) efi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) io_port_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) uv_probe_system_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) parse_early_param();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (early_console_setup(*cmdline_p) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) mark_bsp_online();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* Initialize the ACPI boot-time table parser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) acpi_table_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) early_acpi_boot_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) acpi_numa_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) acpi_numa_fixup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #ifdef CONFIG_ACPI_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) prefill_possible_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) per_cpu_scan_finalize((cpumask_weight(&early_cpu_possible_map) == 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 32 : cpumask_weight(&early_cpu_possible_map)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) additional_cpus > 0 ? additional_cpus : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #endif /* CONFIG_ACPI_NUMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) smp_build_cpu_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) find_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* process SAL system table: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ia64_sal_init(__va(sal_systab_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #ifdef CONFIG_ITANIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned long num_phys_stacked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cpu_physical_id(0) = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) cpu_init(); /* initialize the bootstrap CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) mmu_context_init(); /* initialize context_id bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #ifdef CONFIG_VT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!conswitchp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) # if defined(CONFIG_VGA_CONSOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * Non-legacy systems may route legacy VGA MMIO range to system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * memory. vga_con probes the MMIO hole, so memory looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * a VGA device to it. The EFI memory map can tell us if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * memory so we can avoid this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) conswitchp = &vga_con;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* enable IA-64 Machine Check Abort Handling unless disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!nomca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ia64_mca_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Default to /dev/sda2. This assumes that the EFI partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * is physical disk 1 partition 1 and the Linux root disk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * physical disk 1 partition 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ROOT_DEV = Root_SDA2; /* default to second partition on first drive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (is_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) uv_setup(cmdline_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) init_smp_config();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) screen_info_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) paging_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) clear_sched_clock_stable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Display cpu info for all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) show_cpuinfo (struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) # define lpj c->loops_per_jiffy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) # define cpunum c->cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) # define lpj loops_per_jiffy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) # define cpunum 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) const char *feature_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } feature_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) { 1UL << 0, "branchlong" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) { 1UL << 1, "spontaneous deferral"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) { 1UL << 2, "16-byte atomic ops" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) char features[128], *cp, *sep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct cpuinfo_ia64 *c = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unsigned long proc_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) mask = c->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* build the feature string: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) memcpy(features, "standard", 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) cp = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) size = sizeof(features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) sep = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (mask & feature_bits[i].mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) cp += snprintf(cp, size, "%s%s", sep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) feature_bits[i].feature_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) sep = ", ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) mask &= ~feature_bits[i].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) size = sizeof(features) - (cp - features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (mask && size > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* print unknown features as a hex value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) snprintf(cp, size, "%s0x%lx", sep, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) proc_freq = cpufreq_quick_get(cpunum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!proc_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) proc_freq = c->proc_freq / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) "processor : %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) "vendor : %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) "arch : IA-64\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) "family : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) "model : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) "model name : %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) "revision : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) "archrev : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) "features : %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) "cpu number : %lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) "cpu regs : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) "cpu MHz : %lu.%03lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) "itc MHz : %lu.%06lu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) "BogoMIPS : %lu.%02lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) cpunum, c->vendor, c->family, c->model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) c->model_name, c->revision, c->archrev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) features, c->ppn, c->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) proc_freq / 1000, proc_freq % 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) c->itc_freq / 1000000, c->itc_freq % 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) lpj*HZ/500000, (lpj*HZ/5000) % 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) seq_printf(m, "siblings : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) cpumask_weight(&cpu_core_map[cpunum]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (c->socket_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) seq_printf(m, "physical id: %u\n", c->socket_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (c->threads_per_core > 1 || c->cores_per_socket > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) "core id : %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) "thread id : %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) c->core_id, c->thread_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) seq_printf(m,"\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) c_start (struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) while (*pos < nr_cpu_ids && !cpu_online(*pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) c_next (struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return c_start(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) c_stop (struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) const struct seq_operations cpuinfo_op = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .start = c_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .next = c_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .stop = c_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .show = show_cpuinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #define MAX_BRANDS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static char brandname[MAX_BRANDS][128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) get_model_name(__u8 family, __u8 model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static int overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) char brand[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) memcpy(brand, "Unknown", 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (ia64_pal_get_brand_info(brand)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (family == 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) memcpy(brand, "Merced", 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) else if (family == 0x1f) switch (model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) case 0: memcpy(brand, "McKinley", 9); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) case 1: memcpy(brand, "Madison", 8); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) for (i = 0; i < MAX_BRANDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (strcmp(brandname[i], brand) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return brandname[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) for (i = 0; i < MAX_BRANDS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (brandname[i][0] == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return strcpy(brandname[i], brand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (overflow++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) "%s: Table overflow. Some processor model information will be missing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) identify_cpu (struct cpuinfo_ia64 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unsigned long bits[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* id 0 & 1: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) char vendor[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* id 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) u64 ppn; /* processor serial number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* id 3: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) unsigned number : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) unsigned revision : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) unsigned model : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned family : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned archrev : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned reserved : 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* id 4: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) } field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) } cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pal_vm_info_1_u_t vm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) pal_vm_info_2_u_t vm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) pal_status_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < 5; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) cpuid.bits[i] = ia64_get_cpuid(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) memcpy(c->vendor, cpuid.field.vendor, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) c->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* below default values will be overwritten by identify_siblings()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * for Multi-Threading/Multi-Core capable CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) c->threads_per_core = c->cores_per_socket = c->num_log = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) c->socket_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) identify_siblings(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (c->threads_per_core > smp_num_siblings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) smp_num_siblings = c->threads_per_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) c->ppn = cpuid.field.ppn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) c->number = cpuid.field.number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) c->revision = cpuid.field.revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) c->model = cpuid.field.model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) c->family = cpuid.field.family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) c->archrev = cpuid.field.archrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) c->features = cpuid.field.features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) c->model_name = get_model_name(c->family, c->model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) status = ia64_pal_vm_summary(&vm1, &vm2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (status == PAL_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Do the following calculations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * 1. the max. cache line size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) get_cache_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) unsigned long line_size, max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) unsigned long l, levels, unique_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) pal_cache_config_info_t cci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) status = ia64_pal_cache_summary(&levels, &unique_caches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) max = SMP_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* Safest setup for "flush_icache_range()" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Safest setup for "clflush_cache_range()" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) for (l = 0; l < levels; ++l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* cache_type (data_or_unified)=2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) status = ia64_pal_cache_config_info(l, 2, &cci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) printk(KERN_ERR "%s: ia64_pal_cache_config_info"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) "(l=%lu, 2) failed (status=%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) __func__, l, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) max = SMP_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* The safest setup for "flush_icache_range()" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* The safest setup for "clflush_cache_range()" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cci.pcci_unified = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (cci.pcci_stride < ia64_cache_stride_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) ia64_cache_stride_shift = cci.pcci_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) line_size = 1 << cci.pcci_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (line_size > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) max = line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!cci.pcci_unified) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* cache_type (instruction)=1*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) status = ia64_pal_cache_config_info(l, 1, &cci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) printk(KERN_ERR "%s: ia64_pal_cache_config_info"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) "(l=%lu, 1) failed (status=%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) __func__, l, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* The safest setup for flush_icache_range() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (cci.pcci_stride < ia64_i_cache_stride_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ia64_i_cache_stride_shift = cci.pcci_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (max > ia64_max_cacheline_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ia64_max_cacheline_size = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * cpu_init() initializes state that is per-CPU. This function acts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * as a 'CPU state barrier', nothing should get across.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) cpu_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) extern void ia64_mmu_init(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) unsigned long num_phys_stacked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) pal_vm_info_2_u_t vmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) unsigned int max_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct cpuinfo_ia64 *cpu_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) void *cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) cpu_data = per_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * insert boot cpu into sibling and core mapes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * (must be done after per_cpu area is setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (smp_processor_id() == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) cpumask_set_cpu(0, &cpu_core_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * Set ar.k3 so that assembly code in MCA handler can compute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * physical addresses of per cpu variables with a simple:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * phys = ar.k3 + &per_cpu_var
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * and the alt-dtlb-miss handler can set per-cpu mapping into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * the TLB when needed. head.S already did this for cpu0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ia64_set_kr(IA64_KR_PER_CPU_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ia64_tpa(cpu_data) - (long) __per_cpu_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) get_cache_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * depends on the data returned by identify_cpu(). We break the dependency by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * accessing cpu_data() through the canonical per-CPU address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) identify_cpu(cpu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) #ifdef CONFIG_MCKINLEY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) # define FEATURE_SET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct ia64_pal_retval iprv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (cpu_info->family == 0x1f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) (iprv.v1 | 0x80), FEATURE_SET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /* Clear the stack memory reserved for pt_regs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ia64_set_kr(IA64_KR_FPU_OWNER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Initialize the page-table base register to a global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * directory with all zeroes. This ensure that we can handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * TLB-misses to user address-space even before we created the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * first user address-space. This may happen, e.g., due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * aggressive use of lfetch.fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * Initialize default control register to defer speculative faults except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * for those arising from TLB misses, which are not deferred. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * kernel MUST NOT depend on a particular setting of these bits (in other words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * the kernel must have recovery code for all speculative accesses). Turn on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * be fine).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mmgrab(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) current->active_mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) BUG_ON(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ia64_mmu_init(ia64_imva(cpu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ia64_mca_cpu_init(ia64_imva(cpu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Clear ITC to eliminate sched_clock() overflows in human time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ia64_set_itc(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* disable all local interrupt sources: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ia64_set_itv(1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ia64_set_lrr0(1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ia64_set_lrr1(1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* clear TPR & XTP to enable all interrupt classes: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ia64_setreg(_IA64_REG_CR_TPR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* Clear any pending interrupts left by SAL/EFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ia64_eoi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) normal_xtp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (ia64_pal_vm_summary(NULL, &vmi) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) max_ctx = (1U << 15) - 1; /* use architected minimum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) while (max_ctx < ia64_ctx.max_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) unsigned int old = ia64_ctx.max_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) "stacked regs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) num_phys_stacked = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* size of physical stacked register partition plus 8 bytes: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (num_phys_stacked > max_num_phys_stacked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) max_num_phys_stacked = num_phys_stacked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) check_bugs (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) (unsigned long) __end___mckinley_e9_bundles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static int __init run_dmi_scan(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) dmi_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) core_initcall(run_dmi_scan);