^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * acpi.c - Architecture-Specific Low-Level ACPI Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999 VA Linux Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2000 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/nodemask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <acpi/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/iosapic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/sal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/cyclone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PREFIX "ACPI: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int acpi_lapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int acpi_cpei_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int acpi_cpei_phys_cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define ACPI_MAX_PLATFORM_INTERRUPTS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Array to record platform interrupt vectors for generic interrupt routing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Interrupt routing API for device drivers. Provides interrupt vector for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * a generic platform event. Currently only CPEI is implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int acpi_request_vector(u32 int_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int vector = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* corrected platform error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) vector = platform_intr_list[int_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "acpi_request_vector(): invalid interrupt type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return __va(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* --------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) Boot-time Table Parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int available_cpus __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct acpi_table_madt *acpi_madt __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static u8 has_8259;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct acpi_madt_local_apic_override *lapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) lapic = (struct acpi_madt_local_apic_override *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (BAD_MADT_ENTRY(lapic, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (lapic->address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) iounmap(ipi_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ipi_base_addr = ioremap(lapic->address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct acpi_madt_local_sapic *lsapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) lsapic = (struct acpi_madt_local_sapic *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) smp_boot_data.cpu_phys_id[available_cpus] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) (lsapic->id << 8) | lsapic->eid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ++available_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) total_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct acpi_madt_local_apic_nmi *lacpi_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (BAD_MADT_ENTRY(lacpi_nmi, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* TBD: Support lapic_nmi entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct acpi_madt_io_sapic *iosapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) iosapic = (struct acpi_madt_io_sapic *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (BAD_MADT_ENTRY(iosapic, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return iosapic_init(iosapic->address, iosapic->global_irq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static unsigned int __initdata acpi_madt_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) acpi_parse_plat_int_src(union acpi_subtable_headers * header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct acpi_madt_interrupt_source *plintsrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) plintsrc = (struct acpi_madt_interrupt_source *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (BAD_MADT_ENTRY(plintsrc, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Get vector assignment for this interrupt, set attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * and program the IOSAPIC routing table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) vector = iosapic_register_platform_intr(plintsrc->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) plintsrc->global_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) plintsrc->io_sapic_vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) plintsrc->eid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) plintsrc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ACPI_MADT_TRIGGER_EDGE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) IOSAPIC_EDGE : IOSAPIC_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) platform_intr_list[plintsrc->type] = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (acpi_madt_rev > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Save the physical id, so we can check when its being removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int can_cpei_retarget(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) extern int cpe_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) extern unsigned int force_cpei_retarget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Only if CPEI is supported and the override flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * is present, otherwise return that its re-targettable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * if we are in polling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (cpe_vector > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (acpi_cpei_override || force_cpei_retarget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned int is_cpu_cpei_target(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned int logical_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (logical_id == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void set_cpei_target_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int get_cpei_target_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return acpi_cpei_phys_cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct acpi_madt_interrupt_override *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) p = (struct acpi_madt_interrupt_override *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (BAD_MADT_ENTRY(p, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) iosapic_override_isa_irq(p->source_irq, p->global_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ACPI_MADT_POLARITY_ACTIVE_LOW) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ACPI_MADT_TRIGGER_LEVEL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) IOSAPIC_LEVEL : IOSAPIC_EDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct acpi_madt_nmi_source *nmi_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) nmi_src = (struct acpi_madt_nmi_source *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (BAD_MADT_ENTRY(nmi_src, end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* TBD: Support nimsrc entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Unfortunately ITC_DRIFT is not yet part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * official SAL spec, so the ITC_DRIFT bit is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * set by the BIOS on this hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cyclone_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int __init acpi_parse_madt(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) acpi_madt = (struct acpi_table_madt *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) acpi_madt_rev = acpi_madt->header.revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* remember the value for reference after free_initmem() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #ifdef CONFIG_ITANIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) has_8259 = 1; /* Firmware on old Itanium systems is broken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) iosapic_system_init(has_8259);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Get base address of IPI Message Block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (acpi_madt->address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ipi_base_addr = ioremap(acpi_madt->address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) acpi_madt_oem_check(acpi_madt->header.oem_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) acpi_madt->header.oem_table_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #undef SLIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int __initdata srat_num_cpus; /* number of cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static u32 pxm_flag[PXM_FLAG_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static struct acpi_table_slit __initdata *slit_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pxm = pa->proximity_domain_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (acpi_srat_revision >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pxm += pa->proximity_domain_hi[0] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pxm = ma->proximity_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (acpi_srat_revision <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pxm &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * ACPI 2.0 SLIT (System Locality Information Table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) len = sizeof(struct acpi_table_header) + 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) + slit->locality_count * slit->locality_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (slit->header.length != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) len, slit->header.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) slit_table = slit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) printk_once(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ARRAY_SIZE(node_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) pxm = get_processor_proximity_domain(pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* record this node in proximity bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pxm_bit_set(pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) node_cpuid[srat_num_cpus].phys_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) (pa->apic_id << 8) | (pa->local_sapic_eid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* nid should be overridden as logical node id later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) node_cpuid[srat_num_cpus].nid = pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) srat_num_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long paddr, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct node_memblk_s *p, *q, *pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) pxm = get_memory_proximity_domain(ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* fill node memory chunk structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) paddr = ma->base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) size = ma->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Ignore disabled entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (num_node_memblks >= NR_NODE_MEMBLKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pr_err("NUMA: too many memblk ranges\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* record this node in proximity bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) pxm_bit_set(pxm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Insertion sort based on base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pend = &node_memblk[num_node_memblks];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) for (p = &node_memblk[0]; p < pend; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (paddr < p->start_paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (p < pend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) for (q = pend - 1; q >= p; q--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *(q + 1) = *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) p->start_paddr = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) p->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) p->nid = pxm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) num_node_memblks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) void __init acpi_numa_fixup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int i, j, node_from, node_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* If there's no SRAT, fix the phys_id and mark node 0 online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (srat_num_cpus == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) node_set_online(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) node_cpuid[0].phys_id = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) slit_distance(0, 0) = LOCAL_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * MCD - This can probably be dropped now. No need for pxm ID to node ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) nodes_clear(node_online_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) for (i = 0; i < MAX_PXM_DOMAINS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (pxm_bit_test(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int nid = acpi_map_pxm_to_node(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) node_set_online(nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* set logical node id in memory chunk structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) for (i = 0; i < num_node_memblks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* assign memory bank numbers for each chunk on each node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for_each_online_node(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bank = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) for (j = 0; j < num_node_memblks; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (node_memblk[j].nid == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) node_memblk[j].bank = bank++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* set logical node id in cpu structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) for_each_possible_early_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) printk(KERN_INFO "Number of logical nodes in system = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) num_online_nodes());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) printk(KERN_INFO "Number of memory chunks in system = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) num_node_memblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!slit_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for (i = 0; i < MAX_NUMNODES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) for (j = 0; j < MAX_NUMNODES; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) slit_distance(i, j) = i == j ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) LOCAL_DISTANCE : REMOTE_DISTANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) memset(numa_slit, -1, sizeof(numa_slit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) for (i = 0; i < slit_table->locality_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!pxm_bit_test(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) node_from = pxm_to_node(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) for (j = 0; j < slit_table->locality_count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!pxm_bit_test(j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) node_to = pxm_to_node(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) slit_distance(node_from, node_to) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) slit_table->entry[i * slit_table->locality_count + j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #ifdef SLIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) printk("ACPI 2.0 SLIT locality table:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) for_each_online_node(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) for_each_online_node(j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) printk("%03d ", node_distance(i, j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) node_possible_map = node_online_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #endif /* CONFIG_ACPI_NUMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * success: return IRQ number (>=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * failure: return < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (has_8259 && gsi < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return isa_irq_to_vector(gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return iosapic_register_intr(gsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) (polarity ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) IOSAPIC_POL_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) (triggering ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) IOSAPIC_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) EXPORT_SYMBOL_GPL(acpi_register_gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) void acpi_unregister_gsi(u32 gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (has_8259 && gsi < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) iosapic_unregister_intr(gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static int __init acpi_parse_fadt(struct acpi_table_header *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct acpi_table_header *fadt_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct acpi_table_fadt *fadt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) fadt_header = (struct acpi_table_header *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (fadt_header->revision != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -ENODEV; /* Only deal with ACPI 2.0 FADT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) fadt = (struct acpi_table_fadt *)fadt_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ACPI_ACTIVE_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int __init early_acpi_boot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * do a partial walk of MADT to determine how many CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * we have including offline CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) printk(KERN_ERR PREFIX "Can't find MADT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) acpi_parse_lsapic, NR_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (ret < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) "Error parsing MADT - no LAPIC entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) acpi_lapic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (available_cpus == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) smp_boot_data.cpu_phys_id[available_cpus] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) available_cpus = 1; /* We've got at least one of these, no? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) smp_boot_data.cpu_count = available_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Make boot-up look pretty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) total_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int __init acpi_boot_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * MADT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * ----
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Parse the Multiple APIC Description Table (MADT), if exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Note that this table provides platform SMP configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * information -- the successor to MPS tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) printk(KERN_ERR PREFIX "Can't find MADT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto skip_madt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Local APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (acpi_table_parse_madt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) "Error parsing LAPIC address override entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* I/O APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (acpi_table_parse_madt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) "Error parsing MADT - no IOSAPIC entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* System-Level Interrupt Routing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (acpi_table_parse_madt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) "Error parsing platform interrupt source entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (acpi_table_parse_madt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) printk(KERN_ERR PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) "Error parsing interrupt source overrides entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) skip_madt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * FADT says whether a legacy keyboard controller is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * The FADT also contains an SCI_INT line, by which the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * gets interrupts such as power and sleep buttons. If it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * on a Legacy interrupt, it needs to be setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) printk(KERN_ERR PREFIX "Can't find FADT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (srat_num_cpus == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int cpu, i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (smp_boot_data.cpu_phys_id[cpu] !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) hard_smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) node_cpuid[i++].phys_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) smp_boot_data.cpu_phys_id[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) build_cpu_to_node_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (has_8259 && gsi < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) *irq = isa_irq_to_vector(gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) tmp = gsi_to_irq(gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (tmp == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *irq = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (isa_irq >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) *gsi = isa_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * ACPI based hotplug CPU support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #ifdef CONFIG_ACPI_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * We don't have cpu-only-node hotadd. But if the system equips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * SRAT table, pxm is already found and node is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * So, just pxm_to_nid(pxm) is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * This code here is for the system which doesn't have full SRAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * table for possible cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) node_cpuid[cpu].phys_id = physid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) node_cpuid[cpu].nid = acpi_get_node(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int additional_cpus __initdata = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static __init int setup_additional_cpus(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) additional_cpus = simple_strtol(s, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) early_param("additional_cpus", setup_additional_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * cpu_possible_mask should be static, it cannot change as CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * are onlined, or offlined. The reason is per-cpu data-structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * are allocated by some modules at init time, and dont expect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * do this dynamically on cpu arrival/departure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * cpu_present_mask on the other hand can change dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * In case when cpu_hotplug is not compiled, then we resort to current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * behaviour, which is cpu_possible == cpu_present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * - Ashok Raj
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Three ways to find out the number of additional hotplug CPUs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * - The user can overwrite it with additional_cpus=NUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * - Otherwise don't reserve additional CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) __init void prefill_possible_map(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int possible, disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) disabled_cpus = total_cpus - available_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (additional_cpus == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (disabled_cpus > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) additional_cpus = disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) additional_cpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) possible = available_cpus + additional_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (possible > nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) possible = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) possible, max((possible - available_cpus), 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) for (i = 0; i < possible; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) set_cpu_possible(i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) cpumask_t tmp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cpumask_complement(&tmp_map, cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) cpu = cpumask_first(&tmp_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) acpi_map_cpu2node(handle, cpu, physid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) set_cpu_present(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ia64_cpu_to_sapicid[cpu] = physid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) acpi_processor_set_pdc(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *pcpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* wrapper to silence section mismatch warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int *pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return _acpi_map_lsapic(handle, physid, pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) EXPORT_SYMBOL(acpi_map_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int acpi_unmap_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ia64_cpu_to_sapicid[cpu] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) set_cpu_present(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* NUMA specific cleanup's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) EXPORT_SYMBOL(acpi_unmap_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) #endif /* CONFIG_ACPI_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) void *context, void **ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) union acpi_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct acpi_madt_io_sapic *iosapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned int gsi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Only care about objects w/ a method that returns the MADT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!buffer.length || !buffer.pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) obj = buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (obj->type != ACPI_TYPE_BUFFER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) obj->buffer.length < sizeof(*iosapic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) gsi_base = iosapic->global_irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) kfree(buffer.pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* OK, it's an IOSAPIC MADT entry; associate it with a node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) node = acpi_get_node(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (node == NUMA_NO_NODE || !node_online(node) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) cpumask_empty(cpumask_of_node(node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* We know a gsi to node mapping! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) map_iosapic_to_node(gsi_base, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) acpi_map_iosapics (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fs_initcall(acpi_map_iosapics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) #endif /* CONFIG_ACPI_NUMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if ((err = iosapic_init(phys_addr, gsi_base)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) acpi_map_iosapic(handle, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) #endif /* CONFIG_ACPI_NUMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) EXPORT_SYMBOL(acpi_register_ioapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return iosapic_remove(gsi_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) EXPORT_SYMBOL(acpi_unregister_ioapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * acpi_suspend_lowlevel() - save kernel state and suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * TBD when when IA64 starts to support suspend...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int acpi_suspend_lowlevel(void) { return 0; }