^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Extensible Firmware Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Based on Extensible Firmware Interface Specification version 0.9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * April 30, 1999
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1999 VA Linux Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 1999-2003 Hewlett-Packard Co.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Stephane Eranian <eranian@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Bjorn Helgaas <bjorn.helgaas@hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * All EFI Runtime Services are not implemented yet as EFI only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * supports physical mode addressing on SoftSDV. This is to be fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * in a future version. --drummond 1999-07-20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Implemented EFI runtime services and virtual mode calls. --davidm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Goutham Rao: <goutham.rao@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Skip non-WB memory and ignore empty memory ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/kregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/meminit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/mca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define EFI_DEBUG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define ESI_TABLE_GUID \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static unsigned long mps_phys = EFI_INVALID_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static __initdata unsigned long palo_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long __initdata esi_phys = EFI_INVALID_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long hcdp_phys = EFI_INVALID_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long sal_systab_phys = EFI_INVALID_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static const efi_config_table_type_t arch_tables[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {ESI_TABLE_GUID, &esi_phys, "ESI" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {HCDP_TABLE_GUID, &hcdp_phys, "HCDP" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {MPS_TABLE_GUID, &mps_phys, "MPS" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, &palo_phys, "PALO" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {SAL_SYSTEM_TABLE_GUID, &sal_systab_phys, "SALsystab" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern efi_status_t efi_call_phys (void *, ...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static efi_runtime_services_t *runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define efi_call_virt(f, args...) (*(f))(args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define STUB_GET_TIME(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) efi_time_cap_t *atc = NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (tc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) atc = adjust_arg(tc); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) adjust_arg(tm), atc); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define STUB_SET_TIME(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) prefix##_set_time (efi_time_t *tm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) adjust_arg(tm)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) efi_time_t *tm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ret = efi_call_##prefix( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) efi_time_t *atm = NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (tm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) atm = adjust_arg(tm); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ret = efi_call_##prefix( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) enabled, atm); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define STUB_GET_VARIABLE(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long *data_size, void *data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 *aattr = NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (attr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) aattr = adjust_arg(attr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = efi_call_##prefix( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) (efi_get_variable_t *) __va(runtime->get_variable), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) adjust_arg(name), adjust_arg(vendor), aattr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) adjust_arg(data_size), adjust_arg(data)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) efi_guid_t *vendor) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ret = efi_call_##prefix( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) (efi_get_next_variable_t *) __va(runtime->get_next_variable), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define STUB_SET_VARIABLE(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) u32 attr, unsigned long data_size, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void *data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ret = efi_call_##prefix( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) (efi_set_variable_t *) __va(runtime->set_variable), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) adjust_arg(name), adjust_arg(vendor), attr, data_size, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) adjust_arg(data)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static efi_status_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) prefix##_get_next_high_mono_count (u32 *count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) efi_status_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __va(runtime->get_next_high_mono_count), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) adjust_arg(count)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define STUB_RESET_SYSTEM(prefix, adjust_arg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) prefix##_reset_system (int reset_type, efi_status_t status, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long data_size, efi_char16_t *data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct ia64_fpreg fr[6]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) efi_char16_t *adata = NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) adata = adjust_arg(data); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ia64_save_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) efi_call_##prefix( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (efi_reset_system_t *) __va(runtime->reset_system), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) reset_type, status, data_size, adata); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* should not return, but just in case... */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ia64_load_scratch_fpregs(fr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) STUB_GET_TIME(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) STUB_SET_TIME(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) STUB_GET_WAKEUP_TIME(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) STUB_SET_WAKEUP_TIME(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) STUB_GET_VARIABLE(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) STUB_SET_VARIABLE(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) STUB_RESET_SYSTEM(phys, phys_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define id(arg) arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) STUB_GET_TIME(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) STUB_SET_TIME(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) STUB_GET_WAKEUP_TIME(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) STUB_SET_WAKEUP_TIME(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) STUB_GET_VARIABLE(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) STUB_GET_NEXT_VARIABLE(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) STUB_SET_VARIABLE(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) STUB_RESET_SYSTEM(virt, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) efi_gettimeofday (struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) efi_time_t tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) memset(ts, 0, sizeof(*ts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ts->tv_sec = mktime64(tm.year, tm.month, tm.day,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) tm.hour, tm.minute, tm.second);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ts->tv_nsec = tm.nanosecond;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) is_memory_available (efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!(md->attribute & EFI_MEMORY_WB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) switch (md->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) case EFI_LOADER_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) case EFI_LOADER_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case EFI_BOOT_SERVICES_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case EFI_BOOT_SERVICES_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case EFI_CONVENTIONAL_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) typedef struct kern_memdesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u64 attribute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u64 num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) } kern_memdesc_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static kern_memdesc_t *kern_memmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) kmd_end(kern_memdesc_t *kmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) efi_md_end(efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return (md->phys_addr + efi_md_size(md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) efi_wb(efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return (md->attribute & EFI_MEMORY_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) efi_uc(efi_memory_desc_t *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return (md->attribute & EFI_MEMORY_UC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) walk (efi_freemem_callback_t callback, void *arg, u64 attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) kern_memdesc_t *k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u64 start, end, voff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) for (k = kern_memmap; k->start != ~0UL; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (k->attribute != attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) start = PAGE_ALIGN(k->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (start < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if ((*callback)(start + voff, end + voff, arg) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Walk the EFI memory map and call CALLBACK once for each EFI memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * descriptor that has memory that is available for OS use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) walk(callback, arg, EFI_MEMORY_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Walk the EFI memory map and call CALLBACK once for each EFI memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * descriptor that has memory that is available for uncached allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) walk(callback, arg, EFI_MEMORY_UC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Look for the PAL_CODE region reported by EFI and map it using an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Abstraction Layer chapter 11 in ADAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) efi_get_pal_addr (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int pal_code_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u64 vaddr, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (md->type != EFI_PAL_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (++pal_code_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) "dropped @ %llx\n", md->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * The only ITLB entry in region 7 that is used is the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * installed by __start(). That entry covers a 64MB range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) vaddr = PAGE_OFFSET + md->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * We must check that the PAL mapping won't overlap with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * PAL code is guaranteed to be aligned on a power of 2 between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * 4k and 256KB and that only one ITR is needed to map it. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * implies that the PAL code is always aligned on its size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * i.e., the closest matching page size supported by the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Therefore PAL code is guaranteed never to cross a 64MB unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * it is bigger than 64MB (very unlikely!). So for now the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * following test is enough to determine whether or not we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * a dedicated ITR for the PAL code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if ((vaddr & mask) == (KERNEL_START & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (efi_md_size(md) > IA64_GRANULE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) panic("Whoa! PAL code size bigger than a granule!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #if EFI_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) printk(KERN_INFO "CPU %d: mapping PAL code "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) smp_processor_id(), md->phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) md->phys_addr + efi_md_size(md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return __va(md->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static u8 __init palo_checksum(u8 *buffer, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u8 sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u8 *end = buffer + length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) while (buffer < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) sum = (u8) (sum + *(buffer++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Parse and handle PALO table which is published at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void __init handle_palo(unsigned long phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct palo_table *palo = __va(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) u8 checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) printk(KERN_INFO "PALO signature incorrect.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) checksum = palo_checksum((u8 *)palo, palo->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (checksum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) printk(KERN_INFO "PALO checksum incorrect.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) efi_map_pal_code (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) void *pal_vaddr = efi_get_pal_addr ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) u64 psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!pal_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Cannot write to CRx with PSR.ic=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) psr = ia64_clear_ic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ia64_itr(0x1, IA64_TR_PALCODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) GRANULEROUNDDOWN((unsigned long) pal_vaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) IA64_GRANULE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ia64_set_psr(psr); /* restore psr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) efi_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) const efi_system_table_t *efi_systab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) void *efi_map_start, *efi_map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) char *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) set_bit(EFI_BOOT, &efi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) set_bit(EFI_64BIT, &efi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * It's too early to be able to use the standard kernel command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * support...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) for (cp = boot_command_line; *cp; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (memcmp(cp, "mem=", 4) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mem_limit = memparse(cp + 4, &cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) } else if (memcmp(cp, "max_addr=", 9) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) } else if (memcmp(cp, "min_addr=", 9) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) while (*cp != ' ' && *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ++cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) while (*cp == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ++cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (min_addr != 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) printk(KERN_INFO "Ignoring memory below %lluMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) min_addr >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (max_addr != ~0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) printk(KERN_INFO "Ignoring memory above %lluMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) max_addr >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) efi_systab = __va(ia64_boot_param->efi_systab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Verify the EFI Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (efi_systab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) panic("Whoa! Can't find EFI system table.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (efi_systab_check_header(&efi_systab->hdr, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) panic("Whoa! EFI system table signature incorrect\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) palo_phys = EFI_INVALID_TABLE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (efi_config_parse_tables(__va(efi_systab->tables),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) efi_systab->nr_tables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) arch_tables) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (palo_phys != EFI_INVALID_TABLE_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) handle_palo(palo_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) runtime = __va(efi_systab->runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) efi.get_time = phys_get_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) efi.set_time = phys_set_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) efi.get_wakeup_time = phys_get_wakeup_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) efi.set_wakeup_time = phys_set_wakeup_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) efi.get_variable = phys_get_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) efi.get_next_variable = phys_get_next_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) efi.set_variable = phys_set_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) efi.get_next_high_mono_count = phys_get_next_high_mono_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) efi.reset_system = phys_reset_system;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #if EFI_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* print EFI memory map: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) for (i = 0, p = efi_map_start; p < efi_map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ++i, p += efi_desc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) const char *unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) size = md->num_pages << EFI_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if ((size >> 40) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) size >>= 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unit = "TB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) } else if ((size >> 30) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) size >>= 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unit = "GB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) } else if ((size >> 20) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) size >>= 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unit = "MB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) size >>= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unit = "KB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) printk("mem%02d: %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) i, efi_md_typeattr_format(buf, sizeof(buf), md),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) md->phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) md->phys_addr + efi_md_size(md), size, unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) efi_map_pal_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) efi_enter_virtual_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) efi_enter_virtual_mode (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) efi_status_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (md->attribute & EFI_MEMORY_RUNTIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Some descriptors have multiple bits set, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * order of the tests is relevant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (md->attribute & EFI_MEMORY_WB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) md->virt_addr = (u64) __va(md->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) } else if (md->attribute & EFI_MEMORY_UC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) md->virt_addr = (u64) ioremap(md->phys_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) } else if (md->attribute & EFI_MEMORY_WC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) md->virt_addr = ia64_remap(md->phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) (_PAGE_A |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) _PAGE_P |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) _PAGE_D |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) _PAGE_MA_WC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) _PAGE_PL_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) _PAGE_AR_RW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) md->virt_addr = (u64) ioremap(md->phys_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else if (md->attribute & EFI_MEMORY_WT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) md->virt_addr = ia64_remap(md->phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) (_PAGE_A |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) _PAGE_P |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) _PAGE_D |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) _PAGE_MA_WT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) _PAGE_PL_0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) _PAGE_AR_RW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) md->virt_addr = (u64) ioremap(md->phys_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) status = efi_call_phys(__va(runtime->set_virtual_address_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ia64_boot_param->efi_memmap_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) efi_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ia64_boot_param->efi_memdesc_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (status != EFI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) printk(KERN_WARNING "warning: unable to switch EFI into "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) "virtual mode (status=%lu)\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Now that EFI is in virtual mode, we call the EFI functions more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * efficiently:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) efi.get_time = virt_get_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) efi.set_time = virt_set_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) efi.get_wakeup_time = virt_get_wakeup_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) efi.set_wakeup_time = virt_set_wakeup_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) efi.get_variable = virt_get_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) efi.get_next_variable = virt_get_next_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) efi.set_variable = virt_set_variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) efi.get_next_high_mono_count = virt_get_next_high_mono_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) efi.reset_system = virt_reset_system;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Walk the EFI memory map looking for the I/O port range. There can only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * one entry of this type, other I/O port ranges should be described via ACPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) efi_get_iobase (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (md->attribute & EFI_MEMORY_UC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return md->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static struct kern_memdesc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) kern_memory_descriptor (unsigned long phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct kern_memdesc *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) for (md = kern_memmap; md->start != ~0UL; md++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static efi_memory_desc_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) efi_memory_descriptor (unsigned long phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (phys_addr - md->phys_addr < efi_md_size(md))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) end = phys_addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (md->phys_addr < end && efi_md_end(md) > phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) efi_mem_type (unsigned long phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return md->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) efi_mem_attributes (unsigned long phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return md->attribute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) EXPORT_SYMBOL(efi_mem_attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) efi_mem_attribute (unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) unsigned long end = phys_addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * the kernel that firmware needs this region mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) attr = md->attribute & ~EFI_MEMORY_RUNTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) unsigned long md_end = efi_md_end(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (end <= md_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) md = efi_memory_descriptor(md_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) } while (md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return 0; /* never reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) kern_mem_attribute (unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned long end = phys_addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct kern_memdesc *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * This is a hack for ioremap calls before we set up kern_memmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Maybe we should do efi_memmap_init() earlier instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!kern_memmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) attr = efi_mem_attribute(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (attr & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return EFI_MEMORY_WB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) md = kern_memory_descriptor(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) attr = md->attribute;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned long md_end = kmd_end(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (end <= md_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) md = kern_memory_descriptor(md_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!md || md->attribute != attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) } while (md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0; /* never reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * /dev/mem reads and writes use copy_to_user(), which implicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * uses a granule-sized kernel identity mapping. It's really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * only safe to do this for regions in kern_memmap. For more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * details, see Documentation/ia64/aliasing.rst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) attr = kern_mem_attribute(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) unsigned long phys_addr = pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) attr = efi_mem_attribute(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * /dev/mem mmap uses normal user pages, so we don't need the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * granule, but the entire region we're mapping must support the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Intel firmware doesn't tell us about all the MMIO regions, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * in general we have to allow mmap requests. But if EFI *does*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * tell us about anything inside this region, we should deny it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * The user can always map a smaller region to avoid the overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (efi_memmap_intersects(phys_addr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pgprot_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pgprot_t vma_prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) unsigned long phys_addr = pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * For /dev/mem mmap, we use user mappings, but if the region is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * in kern_memmap (and hence may be covered by a kernel mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * we must use the same attribute as the kernel mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) attr = kern_mem_attribute(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (attr & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return pgprot_cacheable(vma_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) else if (attr & EFI_MEMORY_UC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return pgprot_noncached(vma_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * Some chipsets don't support UC access to memory. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * WB is supported, we prefer that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return pgprot_cacheable(vma_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return pgprot_noncached(vma_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) efi_uart_console_only(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) efi_status_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) char *s, name[] = "ConOut";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) efi_char16_t *utf16, name_utf16[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) unsigned char data[1024];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) unsigned long size = sizeof(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct efi_generic_dev_path *hdr, *end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int uart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* Convert to UTF-16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) utf16 = name_utf16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) s = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) while (*s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) *utf16++ = *s++ & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *utf16 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (status != EFI_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) printk(KERN_ERR "No EFI %s variable?\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) hdr = (struct efi_generic_dev_path *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) while (hdr < end_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (hdr->type == EFI_DEV_MSG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) hdr->sub_type == EFI_DEV_MSG_UART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) uart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) else if (hdr->type == EFI_DEV_END_PATH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) hdr->type == EFI_DEV_END_PATH2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!uart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (hdr->sub_type == EFI_DEV_END_ENTIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) uart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) printk(KERN_ERR "Malformed %s value\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Look for the first granule aligned memory descriptor memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * that is big enough to hold EFI memory map. Make sure this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * descriptor is at least granule sized so it does not get trimmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct kern_memdesc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) find_memmap_space (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) u64 contig_low=0, contig_high=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) u64 as = 0, ae;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) void *efi_map_start, *efi_map_end, *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) efi_memory_desc_t *md, *pmd = NULL, *check_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) u64 space_needed, efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) unsigned long total_mem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * Worst case: we need 3 kernel descriptors for each efi descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * (if every entry has a WB part in the middle, and UC head and tail),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * plus one for the end marker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) space_needed = sizeof(kern_memdesc_t) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (!efi_wb(md)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (pmd == NULL || !efi_wb(pmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) efi_md_end(pmd) != md->phys_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) contig_low = GRANULEROUNDUP(md->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) contig_high = efi_md_end(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) for (q = p + efi_desc_size; q < efi_map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) q += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) check_md = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!efi_wb(check_md))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (contig_high != check_md->phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) contig_high = efi_md_end(check_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) contig_high = GRANULEROUNDDOWN(contig_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* Round ends inward to granule boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) as = max(contig_low, md->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ae = min(contig_high, efi_md_end(md));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* keep within max_addr= and min_addr= command line arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) as = max(as, min_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ae = min(ae, max_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ae <= as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* avoid going over mem= command line arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (total_mem + (ae - as) > mem_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ae -= total_mem + (ae - as) - mem_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (ae <= as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ae - as > space_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (p >= efi_map_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) panic("Can't allocate space for kernel memory descriptors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return __va(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * Walk the EFI memory map and gather all memory available for kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * to use. We can allocate partial granules only if the unavailable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * parts exist, and are WB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) efi_memmap_init(u64 *s, u64 *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct kern_memdesc *k, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u64 contig_low=0, contig_high=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) u64 as, ae, lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) void *efi_map_start, *efi_map_end, *p, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) efi_memory_desc_t *md, *pmd = NULL, *check_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) unsigned long total_mem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) k = kern_memmap = find_memmap_space();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (!efi_wb(md)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (efi_uc(md) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) (md->type == EFI_CONVENTIONAL_MEMORY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) md->type == EFI_BOOT_SERVICES_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) k->attribute = EFI_MEMORY_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) k->start = md->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) k->num_pages = md->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) k++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (pmd == NULL || !efi_wb(pmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) efi_md_end(pmd) != md->phys_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) contig_low = GRANULEROUNDUP(md->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) contig_high = efi_md_end(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) for (q = p + efi_desc_size; q < efi_map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) q += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) check_md = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (!efi_wb(check_md))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (contig_high != check_md->phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) contig_high = efi_md_end(check_md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) contig_high = GRANULEROUNDDOWN(contig_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!is_memory_available(md))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * Round ends inward to granule boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * Give trimmings to uncached allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (md->phys_addr < contig_low) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) lim = min(efi_md_end(md), contig_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (efi_uc(md)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (k > kern_memmap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) (k-1)->attribute == EFI_MEMORY_UC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) kmd_end(k-1) == md->phys_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) (k-1)->num_pages +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) (lim - md->phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) >> EFI_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) k->attribute = EFI_MEMORY_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) k->start = md->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) k->num_pages = (lim - md->phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) >> EFI_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) k++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) as = contig_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) as = md->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (efi_md_end(md) > contig_high) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) lim = max(md->phys_addr, contig_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (efi_uc(md)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (lim == md->phys_addr && k > kern_memmap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) (k-1)->attribute == EFI_MEMORY_UC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) kmd_end(k-1) == md->phys_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) (k-1)->num_pages += md->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) k->attribute = EFI_MEMORY_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) k->start = lim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) k->num_pages = (efi_md_end(md) - lim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) >> EFI_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) k++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ae = contig_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ae = efi_md_end(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* keep within max_addr= and min_addr= command line arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) as = max(as, min_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ae = min(ae, max_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (ae <= as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* avoid going over mem= command line arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (total_mem + (ae - as) > mem_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ae -= total_mem + (ae - as) - mem_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (ae <= as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (prev && kmd_end(prev) == md->phys_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) total_mem += ae - as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) k->attribute = EFI_MEMORY_WB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) k->start = as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) total_mem += ae - as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) prev = k++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) k->start = ~0L; /* end-marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* reserve the memory we are using for kern_memmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) *s = (u64)kern_memmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) *e = (u64)++k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return total_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) efi_initialize_iomem_resources(struct resource *code_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct resource *data_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct resource *bss_resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) unsigned long flags, desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (md->num_pages == 0) /* should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) flags = IORESOURCE_MEM | IORESOURCE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) desc = IORES_DESC_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) switch (md->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) case EFI_MEMORY_MAPPED_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) case EFI_LOADER_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) case EFI_LOADER_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) case EFI_BOOT_SERVICES_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) case EFI_BOOT_SERVICES_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) case EFI_CONVENTIONAL_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (md->attribute & EFI_MEMORY_WP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) name = "System ROM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) flags |= IORESOURCE_READONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) } else if (md->attribute == EFI_MEMORY_UC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) name = "Uncached RAM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) name = "System RAM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) flags |= IORESOURCE_SYSRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) case EFI_ACPI_MEMORY_NVS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) name = "ACPI Non-volatile Storage";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) desc = IORES_DESC_ACPI_NV_STORAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) case EFI_UNUSABLE_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) name = "reserved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) flags |= IORESOURCE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) case EFI_PERSISTENT_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) name = "Persistent Memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) desc = IORES_DESC_PERSISTENT_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) case EFI_RESERVED_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) case EFI_RUNTIME_SERVICES_CODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) case EFI_RUNTIME_SERVICES_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) case EFI_ACPI_RECLAIM_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) name = "reserved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if ((res = kzalloc(sizeof(struct resource),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) GFP_KERNEL)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) "failed to allocate resource for iomem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) res->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) res->start = md->phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) res->end = md->phys_addr + efi_md_size(md) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) res->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) res->desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (insert_resource(&iomem_resource, res) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) kfree(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * We don't know which region contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * kernel data so we try it repeatedly and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * let the resource manager test it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) insert_resource(res, code_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) insert_resource(res, data_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) insert_resource(res, bss_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) insert_resource(res, &efi_memmap_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) insert_resource(res, &boot_param_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (crashk_res.end > crashk_res.start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) insert_resource(res, &crashk_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* find a block of memory aligned to 64M exclude reserved regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) rsvd_regions are sorted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) unsigned long __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) u64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) u64 alignment = 1UL << _PAGE_SIZE_64M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!efi_wb(md))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) start = ALIGN(md->phys_addr, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) end = efi_md_end(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (__pa(r[i].start) > start + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) start = ALIGN(__pa(r[i].end), alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (i < n-1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) __pa(r[i+1].start) < start + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (end > start + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) "Cannot reserve 0x%lx byte of memory for crashdump\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* locate the size find a the descriptor at a certain address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) unsigned long __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) vmcore_find_descriptor_size (unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) void *efi_map_start, *efi_map_end, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) efi_memory_desc_t *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) u64 efi_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) efi_map_start = __va(ia64_boot_param->efi_memmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) efi_desc_size = ia64_boot_param->efi_memdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) md = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (efi_wb(md) && md->type == EFI_LOADER_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) && md->phys_addr == address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = efi_md_size(md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) char *efi_systab_show_arch(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (mps_phys != EFI_INVALID_TABLE_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) str += sprintf(str, "MPS=0x%lx\n", mps_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (hcdp_phys != EFI_INVALID_TABLE_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) str += sprintf(str, "HCDP=0x%lx\n", hcdp_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }