^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Local APIC handling, local APIC timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * thanks to Eric Gilmore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and Rolf G. Tews
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * for testing these extensively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Maciej W. Rozycki : Various updates and fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Mikael Pettersson : Power Management for UP-APIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Pavel Machek and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Mikael Pettersson : PM converted to driver model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mc146818rtc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/acpi_pmtmr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/i8253.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/dmar.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/trace/irq_vectors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/irq_remapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/x86_init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/mpspec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/i8259.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/hpet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/tsc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int num_processors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Processor that is doing the boot up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int boot_cpu_physical_apicid __ro_after_init = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u8 boot_cpu_apic_version __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * The highest APIC ID seen during enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static unsigned int max_physical_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Bitmask of physically existing CPUs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) physid_mask_t phys_cpu_present_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Processor to be disabled specified by kernel parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * avoid undefined behaviour caused by sending INIT from AP to BSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * This variable controls which CPUs receive external NMIs. By default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * external NMIs are delivered only to the BSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Map cpu index to physical APIC ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * On x86_32, the mapping between cpu and logical apicid may vary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * depending on apic in use. The following early percpu variable is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * used for the mapping. This is where the behaviors of x86_64 and 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * actually diverge. Let's keep it ugly for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Local APIC was disabled by the BIOS and enabled by the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int enabled_via_apicbase __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Handle interrupt mode configuration register (IMCR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This register controls whether the interrupt signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * that reach the BSP come from the master PIC or from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * local APIC. Before entering Symmetric I/O Mode, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the BIOS or the operating system must switch out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * PIC Mode by changing the IMCR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void imcr_pic_to_apic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* select IMCR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) outb(0x70, 0x22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* NMI and 8259 INTR go through APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) outb(0x01, 0x23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void imcr_apic_to_pic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* select IMCR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) outb(0x70, 0x22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* NMI and 8259 INTR go directly to BSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) outb(0x00, 0x23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Knob to control our willingness to enable the local APIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * +1=force-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static int force_enable_local_apic __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * APIC command line parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int __init parse_lapic(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (IS_ENABLED(CONFIG_X86_32) && !arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) force_enable_local_apic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) else if (arg && !strncmp(arg, "notscdeadline", 13))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) early_param("lapic", parse_lapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int apic_calibrate_pmtmr __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static __init int setup_apicpmtimer(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) apic_calibrate_pmtmr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) notsc_setup(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __setup("apicpmtimer", setup_apicpmtimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long mp_lapic_addr __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int disable_apic __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Disable local APIC timer from the kernel commandline or via dmi quirk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int disable_apic_timer __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Local APIC timer works in C2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int local_apic_timer_c2_ok __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Debug level, exported for io_apic.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int apic_verbosity __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int pic_mode __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Have we found an MP table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int smp_found_config __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static struct resource lapic_resource = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .name = "Local APIC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int lapic_timer_period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void apic_pm_activate(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static unsigned long apic_phys __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Get the LAPIC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline int lapic_get_version(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return GET_APIC_VERSION(apic_read(APIC_LVR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Check, if the APIC is integrated or a separate chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static inline int lapic_is_integrated(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return APIC_INTEGRATED(lapic_get_version());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Check, whether this is a modern or a first generation APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int modern_apic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* AMD systems use old APIC versions, so check the CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) boot_cpu_data.x86 >= 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Hygon systems use modern APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return lapic_get_version() >= 0x14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * right after this call apic become NOOP driven
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * so apic->write/read doesn't do anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void __init apic_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pr_info("APIC: switched to apic NOOP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) apic = &apic_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void native_apic_wait_icr_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 native_safe_apic_wait_icr_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u32 send_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!send_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) inc_irq_stat(icr_read_retry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) } while (timeout++ < 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return send_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void native_apic_icr_write(u32 low, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) apic_write(APIC_ICR, low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u64 native_apic_icr_read(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 icr1, icr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) icr2 = apic_read(APIC_ICR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) icr1 = apic_read(APIC_ICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return icr1 | ((u64)icr2 << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * get_physical_broadcast - Get number of physical broadcast IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int get_physical_broadcast(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return modern_apic() ? 0xff : 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * lapic_get_maxlvt - get the maximum number of local vector table entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int lapic_get_maxlvt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * - we always have APIC integrated on 64bit mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * - 82489DXs do not report # of LVT entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Local APIC timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Clock divisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define APIC_DIVISOR 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #define TSC_DIVISOR 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * This function sets up the local APIC timer, with a timeout of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * 'clocks' APIC bus clock. During calibration we actually call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * this function twice on the boot CPU, once with a bogus timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * value, second time for real. The other (noncalibrating) CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * call this function only once, with the real, calibrated value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * We do reads before writes even if unnecessary, to get around the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * P5 APIC double write bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned int lvtt_value, tmp_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) lvtt_value = LOCAL_TIMER_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!oneshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) lvtt_value |= APIC_LVT_TIMER_PERIODIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!lapic_is_integrated())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!irqen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) lvtt_value |= APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) apic_write(APIC_LVTT, lvtt_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * According to Intel, MFENCE can do the serialization here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) asm volatile("mfence" : : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Divide PICLK by 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tmp_value = apic_read(APIC_TDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) apic_write(APIC_TDCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) APIC_TDR_DIV_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!oneshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Setup extended LVT, AMD specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Software should use the LVT offsets the BIOS provides. The offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * are determined by the subsystems using it like those for MCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * are supported. Beginning with family 10h at least 4 offsets are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Since the offsets must be consistent for all cores, we keep track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * of the LVT offsets in software and reserve the offset for the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * vector also to be used on other cores. An offset is freed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * setting the entry to APIC_EILVT_MASKED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * If the BIOS is right, there should be no conflicts. Otherwise a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * "[Firmware Bug]: ..." error message is generated. However, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * software does not properly determines the offsets, it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * necessarily a BIOS bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return (old & APIC_EILVT_MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) || (new == APIC_EILVT_MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) || ((new & ~APIC_EILVT_MASKED) == old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned int rsvd, vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (offset >= APIC_EILVT_NR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rsvd = atomic_read(&eilvt_offsets[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (vector && !eilvt_entry_is_changeable(vector, new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* may not change if vectors are different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return rsvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) } while (rsvd != new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rsvd &= ~APIC_EILVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (rsvd && rsvd != vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pr_info("LVT offset %d assigned for vector 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) offset, rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * If mask=1, the LVT entry does not generate interrupts while mask=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * enables the vector. See also the BKDGs. Must be called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned long reg = APIC_EILVTn(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned int new, old, reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) new = (mask << 16) | (msg_type << 8) | vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) old = apic_read(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) reserved = reserve_eilvt_offset(offset, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (reserved != new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) "vector 0x%x, but the register is already in use for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) "vector 0x%x on another cpu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) smp_processor_id(), reg, offset, new, reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!eilvt_entry_is_changeable(old, new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) "vector 0x%x, but the register is already in use for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) "vector 0x%x on this cpu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) smp_processor_id(), reg, offset, new, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) apic_write(reg, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Program the next event, relative to now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static int lapic_next_event(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) apic_write(APIC_TMICT, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int lapic_next_deadline(unsigned long delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) u64 tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* This MSR is special and need a special fence: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) weak_wrmsr_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) tsc = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int lapic_timer_shutdown(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned int v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Lapic used as dummy for broadcast ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (evt->features & CLOCK_EVT_FEAT_DUMMY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) v = apic_read(APIC_LVTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) apic_write(APIC_LVTT, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) apic_write(APIC_TMICT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* Lapic used as dummy for broadcast ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (evt->features & CLOCK_EVT_FEAT_DUMMY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) __setup_APIC_LVTT(lapic_timer_period, oneshot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int lapic_timer_set_periodic(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return lapic_timer_set_periodic_oneshot(evt, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static int lapic_timer_set_oneshot(struct clock_event_device *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return lapic_timer_set_periodic_oneshot(evt, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * Local APIC timer broadcast function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void lapic_timer_broadcast(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * The local apic timer can be used for any function which is CPU local.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static struct clock_event_device lapic_clockevent = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) .name = "lapic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) .features = CLOCK_EVT_FEAT_PERIODIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) | CLOCK_EVT_FEAT_DUMMY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) .shift = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .set_state_shutdown = lapic_timer_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .set_state_periodic = lapic_timer_set_periodic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .set_state_oneshot = lapic_timer_set_oneshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .set_state_oneshot_stopped = lapic_timer_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .set_next_event = lapic_next_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .broadcast = lapic_timer_broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) .rating = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .irq = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static const struct x86_cpu_id deadline_match[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static __init bool apic_validate_deadline_timer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) const struct x86_cpu_id *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u32 rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) m = x86_match_cpu(deadline_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rev = (u32)m->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (boot_cpu_data.microcode >= rev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) "please update microcode to version: 0x%x (or later)\n", rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Setup the local APIC timer for this CPU. Copy the initialized values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * of the boot CPU and register the clock event in the framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static void setup_APIC_timer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (this_cpu_has(X86_FEATURE_ARAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Make LAPIC timer preferrable over percpu HPET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) lapic_clockevent.rating = 150;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) memcpy(levt, &lapic_clockevent, sizeof(*levt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) levt->cpumask = cpumask_of(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) levt->name = "lapic-deadline";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) CLOCK_EVT_FEAT_DUMMY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) levt->set_next_event = lapic_next_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) clockevents_config_and_register(levt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) tsc_khz * (1000 / TSC_DIVISOR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 0xF, ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) clockevents_register_device(levt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Install the updated TSC frequency from recalibration at the TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * deadline clockevent devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static void __lapic_update_tsc_freq(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) void lapic_update_tsc_freq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * The clockevent device's ->mult and ->shift can both be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * changed. In order to avoid races, schedule the frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * update code on each CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * In this functions we calibrate APIC bus clocks to the external timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * We want to do the calibration only once since we want to have local timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * irqs syncron. CPUs connected by the same APIC bus have the very same bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * This was previously done by reading the PIT/HPET and waiting for a wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * around to find out, that a tick has elapsed. I have a box, where the PIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * readout is broken, so it never gets out of the wait loop again. This was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * also reported by others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Monitoring the jiffies value is inaccurate and the clockevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * infrastructure allows us to do a simple substitution of the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * The calibration routine also uses the pm_timer when possible, as the PIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * back to normal later in the boot process).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #define LAPIC_CAL_LOOPS (HZ/10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static __initdata int lapic_cal_loops = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static __initdata long lapic_cal_t1, lapic_cal_t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Temporary interrupt handler and polled calibration function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void __init lapic_cal_handler(struct clock_event_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) unsigned long long tsc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) long tapic = apic_read(APIC_TMCCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) unsigned long pm = acpi_pm_read_early();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (boot_cpu_has(X86_FEATURE_TSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) tsc = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) switch (lapic_cal_loops++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) lapic_cal_t1 = tapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) lapic_cal_tsc1 = tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) lapic_cal_pm1 = pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) lapic_cal_j1 = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) case LAPIC_CAL_LOOPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) lapic_cal_t2 = tapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) lapic_cal_tsc2 = tsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (pm < lapic_cal_pm1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pm += ACPI_PM_OVRRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) lapic_cal_pm2 = pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) lapic_cal_j2 = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) const long pm_thresh = pm_100ms / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned long mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u64 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) #ifndef CONFIG_X86_PM_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Check, if the PM timer is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!deltapm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (deltapm > (pm_100ms - pm_thresh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) deltapm < (pm_100ms + pm_thresh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) res = (((u64)deltapm) * mult) >> 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) do_div(res, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pr_warn("APIC calibration not consistent "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) "with PM-Timer: %ldms instead of 100ms\n", (long)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Correct the lapic counter value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) res = (((u64)(*delta)) * pm_100ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) do_div(res, deltapm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) pr_info("APIC delta adjusted to PM-Timer: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) "%lu (%ld)\n", (unsigned long)res, *delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) *delta = (long)res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Correct the tsc counter value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (boot_cpu_has(X86_FEATURE_TSC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) res = (((u64)(*deltatsc)) * pm_100ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) do_div(res, deltapm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) "PM-Timer: %lu (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) (unsigned long)res, *deltatsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *deltatsc = (long)res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int __init lapic_init_clockevent(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!lapic_timer_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Calculate the scaled math multiplication factor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) TICK_NSEC, lapic_clockevent.shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) lapic_clockevent.max_delta_ns =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) lapic_clockevent.min_delta_ns =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) clockevent_delta2ns(0xF, &lapic_clockevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) lapic_clockevent.min_delta_ticks = 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) bool __init apic_needs_pit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * If the frequencies are not known, PIT is required for both TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * and apic timer calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!tsc_khz || !cpu_khz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Is there an APIC at all or is it disabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * If interrupt delivery mode is legacy PIC or virtual wire without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * configuration, the local APIC timer wont be set up. Make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * that the PIT is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (apic_intr_mode == APIC_PIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* Virt guests may lack ARAT, but still have DEADLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (!boot_cpu_has(X86_FEATURE_ARAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Deadline timer is based on TSC so no further PIT action required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* APIC timer disabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (disable_apic_timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * The APIC timer frequency is known already, no PIT calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * required. If unknown, let the PIT be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return lapic_timer_period == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int __init calibrate_APIC_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) u64 tsc_perj = 0, tsc_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned long jif_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned long deltaj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) long delta, deltatsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int pm_referenced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Check if lapic timer has already been calibrated by platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * specific routine, such as tsc calibration code. If so just fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * in the clockevent structure and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!lapic_init_clockevent()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) lapic_timer_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * Direct calibration methods must have an always running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * local APIC timer, no need for broadcast timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) "calibrating APIC timer ...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * There are platforms w/o global clockevent devices. Instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * making the calibration conditional on that, use a polling based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * approach everywhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Setup the APIC counter to maximum. There is no way the lapic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * can underflow in the 100ms detection time frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) __setup_APIC_LVTT(0xffffffff, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Methods to terminate the calibration loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * 1) Global clockevent if available (jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * 2) TSC if available and frequency is known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) jif_start = READ_ONCE(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (tsc_khz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) tsc_start = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Enable interrupts so the tick can fire, if a global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * clockevent device is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* Wait for a tick to elapse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (tsc_khz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) u64 tsc_now = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if ((tsc_now - tsc_start) >= tsc_perj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) tsc_start += tsc_perj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned long jif_now = READ_ONCE(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (time_after(jif_now, jif_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) jif_start = jif_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* Invoke the calibration routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) lapic_cal_handler(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* Build delta t1-t2 as apic timer counts down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) delta = lapic_cal_t1 - lapic_cal_t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* we trust the PM based calibration if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) &delta, &deltatsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) lapic_init_clockevent();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) lapic_timer_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (boot_cpu_has(X86_FEATURE_TSC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) "%ld.%04ld MHz.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) "%u.%04u MHz.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) lapic_timer_period / (1000000 / HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) lapic_timer_period % (1000000 / HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * Do a sanity check on the APIC calibration result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (lapic_timer_period < (1000000 / HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) pr_warn("APIC frequency too slow, disabling apic timer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * PM timer calibration failed or not turned on so lets try APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * timer based calibration, if a global clockevent device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!pm_referenced && global_clock_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Setup the apic timer manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) levt->event_handler = lapic_cal_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) lapic_timer_set_periodic(levt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) lapic_cal_loops = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* Let the interrupts run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Stop the lapic timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) lapic_timer_shutdown(levt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Jiffies delta */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) deltaj = lapic_cal_j2 - lapic_cal_j1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Check, if the jiffies result is consistent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) levt->features |= CLOCK_EVT_FEAT_DUMMY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pr_warn("APIC timer disabled due to verification failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * Setup the boot APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Calibrate and verify the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) void __init setup_boot_APIC_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * The local apic timer can be disabled via the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * commandline or from the CPU detection code. Register the lapic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * timer as a dummy clock event source on SMP systems, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * broadcast mechanism is used. On UP systems simply ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (disable_apic_timer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) pr_info("Disabling APIC timer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* No broadcast on UP ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (num_possible_cpus() > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) lapic_clockevent.mult = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) setup_APIC_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (calibrate_APIC_clock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /* No broadcast on UP ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (num_possible_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) setup_APIC_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * If nmi_watchdog is set to IO_APIC, we need the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * PIT/HPET going. Otherwise register lapic as a dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Setup the lapic or request the broadcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) setup_APIC_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) amd_e400_c1e_apic_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) void setup_secondary_APIC_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) setup_APIC_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) amd_e400_c1e_apic_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * The guts of the apic timer interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void local_apic_timer_interrupt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * Normally we should not be here till LAPIC has been initialized but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * in some cases like kdump, its possible that there is a pending LAPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * timer interrupt from previous kernel's context and is delivered in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * new kernel the moment interrupts are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Interrupts are enabled early and LAPIC is setup much later, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * its possible that when we get here evt->event_handler is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Check for event_handler being NULL and discard the interrupt as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * spurious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!evt->event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) pr_warn("Spurious LAPIC timer interrupt on cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* Switch it off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) lapic_timer_shutdown(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * the NMI deadlock-detector uses this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) inc_irq_stat(apic_timer_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) evt->event_handler(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * Local APIC timer interrupt. This is the most natural way for doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * local interrupts, but local timer interrupts can be emulated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * broadcast interrupts too. [in case the hw doesn't support APIC timers]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * [ if a single-CPU system runs an SMP kernel then we call the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * interrupt as well. Thus we cannot inline the local irq ... ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct pt_regs *old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ack_APIC_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) trace_local_timer_entry(LOCAL_TIMER_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) local_apic_timer_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) trace_local_timer_exit(LOCAL_TIMER_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int setup_profiling_timer(unsigned int multiplier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * Local APIC start and shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * clear_local_APIC - shutdown the local APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * This is called, when a CPU is disabled and before rebooting, so the state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * leftovers during boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) void clear_local_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int maxlvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* APIC hasn't been mapped yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!x2apic_mode && !apic_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) maxlvt = lapic_get_maxlvt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * Masking an LVT entry can trigger a local APIC error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * if the vector is zero. Mask LVTERR first to prevent this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (maxlvt >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * Careful: we have to set masks only first to deassert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * any level-triggered sources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) v = apic_read(APIC_LVTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) v = apic_read(APIC_LVT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) v = apic_read(APIC_LVT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (maxlvt >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) v = apic_read(APIC_LVTPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* lets not touch this if we didn't frob it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) #ifdef CONFIG_X86_THERMAL_VECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (maxlvt >= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) v = apic_read(APIC_LVTTHMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) #ifdef CONFIG_X86_MCE_INTEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (maxlvt >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) v = apic_read(APIC_LVTCMCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (!(v & APIC_LVT_MASKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Clean APIC state for other OSs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) apic_write(APIC_LVTT, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) apic_write(APIC_LVT0, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) apic_write(APIC_LVT1, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (maxlvt >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) apic_write(APIC_LVTERR, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (maxlvt >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) apic_write(APIC_LVTPC, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* Integrated APIC (!82489DX) ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (lapic_is_integrated()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (maxlvt > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* Clear ESR due to Pentium errata 3AP and 11AP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * apic_soft_disable - Clears and software disables the local APIC on hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * Contrary to disable_local_APIC() this does not touch the enable bit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * MSR_IA32_APICBASE. Clearing that bit on systems based on the 3 wire APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * bus would require a hardware reset as the APIC would lose track of bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * arbitration. On systems with FSB delivery APICBASE could be disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * but it has to be guaranteed that no interrupt is sent to the APIC while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * in that state and it's not clear from the SDM whether it still responds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * to INIT/SIPI messages. Stay on the safe side and use software disable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) void apic_soft_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) clear_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* Soft disable APIC (implies clearing of registers for 82489DX!). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) value = apic_read(APIC_SPIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) value &= ~APIC_SPIV_APIC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) apic_write(APIC_SPIV, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * disable_local_APIC - clear and disable the local APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) void disable_local_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* APIC hasn't been mapped yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!x2apic_mode && !apic_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) apic_soft_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * When LAPIC was disabled by the BIOS and enabled by the kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * restore the disabled state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (enabled_via_apicbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) unsigned int l, h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) rdmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) l &= ~MSR_IA32_APICBASE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) wrmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * If Linux enabled the LAPIC against the BIOS default disable it down before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * not power-off. Additionally clear all LVT entries before disable_local_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * for the case where Linux didn't enable the LAPIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) void lapic_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!enabled_via_apicbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) clear_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) disable_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * sync_Arb_IDs - synchronize APIC bus arbitration IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) void __init sync_Arb_IDs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * needed on AMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * Wait for idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) apic_wait_icr_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) apic_write(APIC_ICR, APIC_DEST_ALLINC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) APIC_INT_LEVELTRIG | APIC_DM_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) enum apic_intr_mode_id apic_intr_mode __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static int __init __apic_intr_mode_select(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /* Check kernel option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (disable_apic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) pr_info("APIC disabled via kernel command line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return APIC_PIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* Check BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* On 64-bit, the APIC must be integrated, Check local APIC only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (!boot_cpu_has(X86_FEATURE_APIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) disable_apic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) pr_info("APIC disabled by BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return APIC_PIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* On 32-bit, the APIC may be integrated APIC or 82489DX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Neither 82489DX nor integrated APIC ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) disable_apic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return APIC_PIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* If the BIOS pretends there is an integrated APIC ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (!boot_cpu_has(X86_FEATURE_APIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) APIC_INTEGRATED(boot_cpu_apic_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) disable_apic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) boot_cpu_physical_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return APIC_PIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* Check MP table or ACPI MADT configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (!smp_found_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) disable_ioapic_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (!acpi_lapic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) pr_info("APIC: ACPI MADT or MP tables are not detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return APIC_VIRTUAL_WIRE_NO_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return APIC_VIRTUAL_WIRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /* If SMP should be disabled, then really disable it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (!setup_max_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) pr_info("APIC: SMP mode deactivated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return APIC_SYMMETRIC_IO_NO_ROUTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (read_apic_id() != boot_cpu_physical_apicid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) read_apic_id(), boot_cpu_physical_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /* Or can we switch back to PIC here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return APIC_SYMMETRIC_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Select the interrupt delivery mode for the BSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) void __init apic_intr_mode_select(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) apic_intr_mode = __apic_intr_mode_select();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * An initial setup of the virtual wire mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) void __init init_bsp_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * Don't do the setup now if we have a SMP BIOS as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * through-I/O-APIC virtual wire mode might be active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * Do not trust the local APIC being empty at bootup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) clear_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * Enable APIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) value = apic_read(APIC_SPIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) value &= ~APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) value |= APIC_SPIV_APIC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* This bit is reserved on P4/Xeon and should be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) (boot_cpu_data.x86 == 15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) value &= ~APIC_SPIV_FOCUS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) value |= APIC_SPIV_FOCUS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) value |= SPURIOUS_APIC_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) apic_write(APIC_SPIV, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * Set up the virtual wire mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) apic_write(APIC_LVT0, APIC_DM_EXTINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) value = APIC_DM_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!lapic_is_integrated()) /* 82489DX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) value |= APIC_LVT_LEVEL_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (apic_extnmi == APIC_EXTNMI_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) value |= APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) apic_write(APIC_LVT1, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) static void __init apic_bsp_setup(bool upmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* Init the interrupt delivery mode for the BSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) void __init apic_intr_mode_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) switch (apic_intr_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) case APIC_PIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) pr_info("APIC: Keep in PIC mode(8259)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) case APIC_VIRTUAL_WIRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) pr_info("APIC: Switch to virtual wire mode setup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) default_setup_apic_routing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) case APIC_VIRTUAL_WIRE_NO_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) upmode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) default_setup_apic_routing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case APIC_SYMMETRIC_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) pr_info("APIC: Switch to symmetric I/O mode setup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) default_setup_apic_routing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) case APIC_SYMMETRIC_IO_NO_ROUTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (x86_platform.apic_post_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) x86_platform.apic_post_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) apic_bsp_setup(upmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void lapic_setup_esr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) unsigned int oldvalue, value, maxlvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!lapic_is_integrated()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) pr_info("No ESR for 82489DX.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (apic->disable_esr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * Something untraceable is creating bad interrupts on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * secondary quads ... for the moment, just leave the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * ESR disabled - we can't do anything useful with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * errors anyway - mbligh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) pr_info("Leaving ESR disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) maxlvt = lapic_get_maxlvt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) oldvalue = apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /* enables sending errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) value = ERROR_APIC_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) apic_write(APIC_LVTERR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * spec says clear errors after enabling vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (maxlvt > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) value = apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (value != oldvalue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) apic_printk(APIC_VERBOSE, "ESR value before enabling "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) "vector: 0x%08x after: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) oldvalue, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) #define APIC_IR_REGS APIC_ISR_NR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) #define APIC_IR_BITS (APIC_IR_REGS * 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) #define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) union apic_ir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) unsigned long map[APIC_IR_MAPSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) u32 regs[APIC_IR_REGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) int i, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* Read the IRRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) for (i = 0; i < APIC_IR_REGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* Read the ISRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) for (i = 0; i < APIC_IR_REGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * If the ISR map is not empty. ACK the APIC and run another round
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * to verify whether a pending IRR has been unblocked and turned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * into a ISR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * There can be multiple ISR bits set when a high priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * interrupt preempted a lower priority one. Issue an ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * per set bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) for_each_set_bit(bit, isr->map, APIC_IR_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ack_APIC_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return !bitmap_empty(irr->map, APIC_IR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * After a crash, we no longer service the interrupts and a pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * interrupt from previous kernel might still have ISR bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * Most probably by now the CPU has serviced that pending interrupt and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * might not have done the ack_APIC_irq() because it thought, interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * a vector might get locked. It was noticed for timer irq (vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * 0x31). Issue an extra EOI to clear ISR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * If there are pending IRR bits they turn into ISR bits after a higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * priority ISR bit has been acked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static void apic_pending_intr_clear(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) union apic_ir irr, isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /* 512 loops are way oversized and give the APIC a chance to obey. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) for (i = 0; i < 512; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!apic_check_and_ack(&irr, &isr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* Dump the IRR/ISR content if that failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * setup_local_APIC - setup the local APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Used to setup local APIC while initializing BSP or bringing up APs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Always called with preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static void setup_local_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (disable_apic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) disable_ioapic_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * If this comes from kexec/kcrash the APIC might be enabled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * SPIV. Soft disable it before doing further initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) value = apic_read(APIC_SPIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) value &= ~APIC_SPIV_APIC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) apic_write(APIC_SPIV, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /* Pound the ESR really hard over the head with a big hammer - mbligh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (lapic_is_integrated() && apic->disable_esr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * Double-check whether this APIC is really registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * This is meaningless in clustered apic mode, so we skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) BUG_ON(!apic->apic_id_registered());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * Intel recommends to set DFR, LDR and TPR before enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * document number 292116). So here it goes...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) apic->init_apic_ldr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (apic->dest_logical) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int logical_apicid, ldr_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * APIC LDR is initialized. If logical_apicid mapping was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * initialized during get_smp_config(), make sure it matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * the actual value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (logical_apicid != BAD_APICID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) WARN_ON(logical_apicid != ldr_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Always use the value from LDR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * Set Task Priority to 'accept all except vectors 0-31'. An APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * vector in the 16-31 range could be delivered if TPR == 0, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * would think it's an exception and terrible things will happen. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * never change this later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) value = apic_read(APIC_TASKPRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) value &= ~APIC_TPRI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) value |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) apic_write(APIC_TASKPRI, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /* Clear eventually stale ISR/IRR bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) apic_pending_intr_clear();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * Now that we are all set up, enable the APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) value = apic_read(APIC_SPIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) value &= ~APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * Enable APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) value |= APIC_SPIV_APIC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * Some unknown Intel IO/APIC (or APIC) errata is biting us with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * certain networking cards. If high frequency interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * happening on a particular IOAPIC pin, plus the IOAPIC routing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * entry is masked/unmasked at a high rate as well then sooner or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * later IOAPIC line gets 'stuck', no more interrupts are received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * from the device. If focus CPU is disabled then the hang goes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * away, oh well :-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * [ This bug can be reproduced easily with a level-triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * PCI Ne2000 networking cards and PII/PIII processors, dual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * BX chipset. ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * Actually disabling the focus CPU check just makes the hang less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * frequent as it makes the interrupt distributon model be more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * like LRU than MRU (the short-term load is more even across CPUs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * - enable focus processor (bit==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * - 64bit mode always use processor focus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * so no need to set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) value &= ~APIC_SPIV_FOCUS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * Set spurious IRQ vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) value |= SPURIOUS_APIC_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) apic_write(APIC_SPIV, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) perf_events_lapic_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * Set up LVT0, LVT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * set up through-local-APIC on the boot CPU's LINT0. This is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * strictly necessary in pure symmetric-IO mode, but sometimes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * we delegate interrupts to the 8259A.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * TODO: set up through-local-APIC from through-I/O-APIC? --macro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) value = APIC_DM_EXTINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) value = APIC_DM_EXTINT | APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) apic_write(APIC_LVT0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * Only the BSP sees the LINT1 NMI signal by default. This can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * modified by apic_extnmi= boot option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) apic_extnmi == APIC_EXTNMI_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) value = APIC_DM_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) value = APIC_DM_NMI | APIC_LVT_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) /* Is 82489DX ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (!lapic_is_integrated())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) value |= APIC_LVT_LEVEL_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) apic_write(APIC_LVT1, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) #ifdef CONFIG_X86_MCE_INTEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* Recheck CMCI information after local APIC is up on CPU #0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) cmci_recheck();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) static void end_local_APIC_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) lapic_setup_esr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /* Disable the local apic timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) value = apic_read(APIC_LVTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) apic_write(APIC_LVTT, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) apic_pm_activate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * APIC setup function for application processors. Called from smpboot.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) void apic_ap_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) setup_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) end_local_APIC_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) #ifdef CONFIG_X86_X2APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int x2apic_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) X2APIC_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) X2APIC_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) X2APIC_DISABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static int x2apic_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) static void __x2apic_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (!boot_cpu_has(X86_FEATURE_APIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) rdmsrl(MSR_IA32_APICBASE, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (!(msr & X2APIC_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* Disable xapic and x2apic first and then reenable xapic mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) printk_once(KERN_INFO "x2apic disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static void __x2apic_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) rdmsrl(MSR_IA32_APICBASE, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (msr & X2APIC_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) printk_once(KERN_INFO "x2apic enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static int __init setup_nox2apic(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (x2apic_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) int apicid = native_apic_msr_read(APIC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (apicid >= 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) pr_warn("x2apic already enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) __x2apic_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) setup_clear_cpu_cap(X86_FEATURE_X2APIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) x2apic_state = X2APIC_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) x2apic_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) early_param("nox2apic", setup_nox2apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /* Called from cpu_init() to enable x2apic on (secondary) cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) void x2apic_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * If x2apic is not in ON state, disable it if already enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * from BIOS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (x2apic_state != X2APIC_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) __x2apic_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) __x2apic_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static __init void x2apic_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) u32 x2apic_id, state = x2apic_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) x2apic_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) x2apic_state = X2APIC_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (state != X2APIC_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) x2apic_id = read_apic_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (x2apic_id >= 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) __x2apic_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) register_lapic_address(mp_lapic_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) static __init void x2apic_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (x2apic_state != X2APIC_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) x2apic_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) x2apic_state = X2APIC_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) __x2apic_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static __init void try_to_enable_x2apic(int remap_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (x2apic_state == X2APIC_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * Using X2APIC without IR is not architecturally supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * on bare metal but may be supported in guests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (!x86_init.hyper.x2apic_available()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) x2apic_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * Without IR, all CPUs can be addressed by IOAPIC/MSI only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * in physical mode, and CPUs with an APIC ID that cannnot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * be addressed must not be brought online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) x2apic_set_max_apicid(255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) x2apic_phys = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) x2apic_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) void __init check_x2apic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (x2apic_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) x2apic_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) x2apic_state = X2APIC_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) x2apic_state = X2APIC_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) #else /* CONFIG_X86_X2APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) static int __init validate_x2apic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (!apic_is_x2apic_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * Checkme: Can we simply turn off x2apic here instead of panic?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) early_initcall(validate_x2apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static inline void try_to_enable_x2apic(int remap_mode) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static inline void __x2apic_enable(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) #endif /* !CONFIG_X86_X2APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) void __init enable_IR_x2apic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) int ret, ir_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (skip_ioapic_setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) ir_stat = irq_remapping_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (ir_stat < 0 && !x2apic_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) ret = save_ioapic_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) pr_info("Saving IO-APIC state failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) legacy_pic->mask_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) mask_ioapic_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* If irq_remapping_prepare() succeeded, try to enable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (ir_stat >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ir_stat = irq_remapping_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) /* ir_stat contains the remap mode or an error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) try_to_enable_x2apic(ir_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (ir_stat < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) restore_ioapic_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) legacy_pic->restore_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * Detect and enable local APICs on non-SMP boards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * Original code written by Keir Fraser.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * On AMD64 we trust the BIOS - if it says no APIC it is likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * not correctly set up (usually the APIC timer won't work etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) static int __init detect_init_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (!boot_cpu_has(X86_FEATURE_APIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) pr_info("No local APIC present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static int __init apic_verify(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) u32 features, h, l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * The APIC feature bit should now be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * in `cpuid'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) features = cpuid_edx(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (!(features & (1 << X86_FEATURE_APIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) pr_warn("Could not enable APIC!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /* The BIOS may have set up the APIC at some other address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) if (boot_cpu_data.x86 >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) rdmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (l & MSR_IA32_APICBASE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) pr_info("Found and enabled local APIC!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) int __init apic_force_enable(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) u32 h, l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (disable_apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * Some BIOSes disable the local APIC in the APIC_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * MSR. This can only be done in software for Intel P6 or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * and AMD K7 (Model > 1) or later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (boot_cpu_data.x86 >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) rdmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (!(l & MSR_IA32_APICBASE_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) pr_info("Local APIC disabled by BIOS -- reenabling.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) l &= ~MSR_IA32_APICBASE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) l |= MSR_IA32_APICBASE_ENABLE | addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) wrmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) enabled_via_apicbase = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return apic_verify();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * Detect and initialize APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static int __init detect_init_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /* Disabled by kernel option? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (disable_apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) switch (boot_cpu_data.x86_vendor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) case X86_VENDOR_AMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) (boot_cpu_data.x86 >= 15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) goto no_apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) case X86_VENDOR_HYGON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) case X86_VENDOR_INTEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto no_apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) goto no_apic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (!boot_cpu_has(X86_FEATURE_APIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * Over-ride BIOS and try to enable the local APIC only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * "lapic" specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (!force_enable_local_apic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) pr_info("Local APIC disabled by BIOS -- "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) "you can enable it with \"lapic\"\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (apic_verify())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) apic_pm_activate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) no_apic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) pr_info("No local APIC present or hardware disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * init_apic_mappings - initialize APIC mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) void __init init_apic_mappings(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) unsigned int new_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (apic_validate_deadline_timer())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) pr_info("TSC deadline timer available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (x2apic_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) boot_cpu_physical_apicid = read_apic_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) /* If no local APIC can be found return early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (!smp_found_config && detect_init_APIC()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /* lets NOP'ify apic operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) pr_info("APIC: disable apic facility\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) apic_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) apic_phys = mp_lapic_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * If the system has ACPI MADT tables or MP info, the LAPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * address is already registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (!acpi_lapic && !smp_found_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) register_lapic_address(apic_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * Fetch the APIC ID of the BSP in case we have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * default configuration (or the MP table is broken).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) new_apicid = read_apic_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (boot_cpu_physical_apicid != new_apicid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) boot_cpu_physical_apicid = new_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * yeah -- we lie about apic_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * in case if apic was disabled via boot option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * but it's not a problem for SMP compiled kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * since apic_intr_mode_select is prepared for such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * a case and disable smp mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) void __init register_lapic_address(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) mp_lapic_addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (!x2apic_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) set_fixmap_nocache(FIX_APIC_BASE, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) APIC_BASE, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (boot_cpu_physical_apicid == -1U) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) boot_cpu_physical_apicid = read_apic_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * Local APIC interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * spurious_interrupt - Catch all for interrupts raised on unused vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * @regs: Pointer to pt_regs on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * @vector: The vector number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * This is invoked from ASM entry code to catch all interrupts which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * trigger on an entry which is routed to the common_spurious idtentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * Also called from sysvec_spurious_apic_interrupt().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) DEFINE_IDTENTRY_IRQ(spurious_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) trace_spurious_apic_entry(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) inc_irq_stat(irq_spurious_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * If this is a spurious interrupt then do not acknowledge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (vector == SPURIOUS_APIC_VECTOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) /* See SDM vol 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * If it is a vectored one, verify it's set in the ISR. If set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * acknowledge it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (v & (1 << (vector & 0x1f))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) vector, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) ack_APIC_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) vector, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) trace_spurious_apic_exit(vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * This interrupt should never happen with our APIC/SMP architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static const char * const error_interrupt_reason[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) "Send CS error", /* APIC Error Bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) "Receive CS error", /* APIC Error Bit 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) "Send accept error", /* APIC Error Bit 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) "Receive accept error", /* APIC Error Bit 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) "Redirectable IPI", /* APIC Error Bit 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) "Send illegal vector", /* APIC Error Bit 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) "Received illegal vector", /* APIC Error Bit 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) "Illegal register address", /* APIC Error Bit 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) u32 v, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) trace_error_apic_entry(ERROR_APIC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) /* First tickle the hardware, only then report what went on. -- REW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) v = apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ack_APIC_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) atomic_inc(&irq_err_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) smp_processor_id(), v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) v &= 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) while (v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (v & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) v >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) apic_printk(APIC_DEBUG, KERN_CONT "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) trace_error_apic_exit(ERROR_APIC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * connect_bsp_APIC - attach the APIC to the interrupt system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) static void __init connect_bsp_APIC(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (pic_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * Do not trust the local APIC being empty at bootup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) clear_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * local APIC to INT and NMI lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) apic_printk(APIC_VERBOSE, "leaving PIC mode, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) "enabling APIC mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) imcr_pic_to_apic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * disconnect_bsp_APIC - detach the APIC from the interrupt system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * @virt_wire_setup: indicates, whether virtual wire mode is selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * Virtual wire mode is necessary to deliver legacy interrupts even when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * APIC is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) void disconnect_bsp_APIC(int virt_wire_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (pic_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * Put the board back into PIC mode (has an effect only on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * certain older boards). Note that APIC interrupts, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * IPIs, won't work beyond this point! The only exception are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * INIT IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) apic_printk(APIC_VERBOSE, "disabling APIC mode, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) "entering PIC mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) imcr_apic_to_pic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* Go back to Virtual Wire compatibility mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) /* For the spurious interrupt use vector F, and enable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) value = apic_read(APIC_SPIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) value &= ~APIC_VECTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) value |= APIC_SPIV_APIC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) value |= 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) apic_write(APIC_SPIV, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (!virt_wire_setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * For LVT0 make it edge triggered, active high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * external and enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) value = apic_read(APIC_LVT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) apic_write(APIC_LVT0, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* Disable LVT0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) apic_write(APIC_LVT0, APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * For LVT1 make it edge triggered, active high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * nmi and enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) value = apic_read(APIC_LVT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) apic_write(APIC_LVT1, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * contiguously, it equals to current allocated max logical CPU ID plus 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * so the maximum of nr_logical_cpuids is nr_cpu_ids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * NOTE: Reserve 0 for BSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) static int nr_logical_cpuids = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * Used to store mapping between logical CPU IDs and APIC IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) static int cpuid_to_apicid[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) [0 ... NR_CPUS - 1] = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) return phys_id == cpuid_to_apicid[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) * @apicid: APIC ID to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) bool apic_id_is_primary_thread(unsigned int apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (smp_num_siblings == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) /* Isolate the SMT bit(s) in the APICID and check for 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return !(apicid & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * and cpuid_to_apicid[] synchronized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static int allocate_logical_cpuid(int apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * cpuid <-> apicid mapping is persistent, so when a cpu is up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * check if the kernel has allocated a cpuid for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) for (i = 0; i < nr_logical_cpuids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (cpuid_to_apicid[i] == apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* Allocate a new cpuid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (nr_logical_cpuids >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) "Processor %d/0x%x and the rest are ignored.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) nr_cpu_ids, nr_logical_cpuids, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) cpuid_to_apicid[nr_logical_cpuids] = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) return nr_logical_cpuids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) int generic_processor_info(int apicid, int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) int cpu, max = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * boot_cpu_physical_apicid is designed to have the apicid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * returned by read_apic_id(), i.e, the apicid of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * currently booting-up processor. However, on some platforms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * it is temporarily modified by the apicid reported as BSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * through MP table. Concretely:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * - arch/x86/kernel/mpparse.c: MP_processor_info()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * - arch/x86/mm/amdtopology.c: amd_numa_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * This function is executed with the modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * parameter doesn't work to disable APs on kdump 2nd kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * Since fixing handling of boot_cpu_physical_apicid requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * another discussion and tests on each platform, we leave it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * for now and here we use read_apic_id() directly in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * function, generic_processor_info().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (disabled_cpu_apicid != BAD_APICID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) disabled_cpu_apicid != read_apic_id() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) disabled_cpu_apicid == apicid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) int thiscpu = num_processors + disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) pr_warn("APIC: Disabling requested cpu."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) " Processor %d/0x%x ignored.\n", thiscpu, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) disabled_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * If boot cpu has not been detected yet, then only allow upto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) apicid != boot_cpu_physical_apicid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) int thiscpu = max + disabled_cpus - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) pr_warn("APIC: NR_CPUS/possible_cpus limit of %i almost"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) " reached. Keeping one slot for boot cpu."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) disabled_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (num_processors >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int thiscpu = max + disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) "Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) disabled_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (apicid == boot_cpu_physical_apicid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * x86_bios_cpu_apicid is required to have processors listed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * in same order as logical cpu numbers. Hence the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) * entry is BSP, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) * boot_cpu_init() already hold bit 0 in cpu_present_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) * for BSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) /* Logical cpuid 0 is reserved for BSP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) cpuid_to_apicid[0] = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) cpu = allocate_logical_cpuid(apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) disabled_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) * Validate version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (version == 0x0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) pr_warn("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) cpu, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) version = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (version != boot_cpu_apic_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) pr_warn("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) boot_cpu_apic_version, cpu, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (apicid > max_physical_apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) max_physical_apicid = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) apic->x86_32_early_logical_apicid(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) set_cpu_possible(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) physid_set(apicid, phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) set_cpu_present(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) num_processors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) int hard_smp_processor_id(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return read_apic_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * Override the generic EOI implementation with an optimized version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * Only called during early boot when only one CPU is active and with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * interrupts disabled, so we know this does not race with actual APIC driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) struct apic **drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /* Should happen once for each apic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) WARN_ON((*drv)->eoi_write == eoi_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) (*drv)->native_eoi_write = (*drv)->eoi_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) (*drv)->eoi_write = eoi_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) static void __init apic_bsp_up_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * Hack: In case of kdump, after a crash, kernel might be booting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * might be zero if read from MP tables. Get it from LAPIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) # ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) boot_cpu_physical_apicid = read_apic_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * apic_bsp_setup - Setup function for local apic and io-apic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * @upmode: Force UP mode (for APIC_init_uniprocessor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static void __init apic_bsp_setup(bool upmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) connect_bsp_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (upmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) apic_bsp_up_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) setup_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) enable_IO_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) end_local_APIC_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) irq_remap_enable_fault_handling();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) setup_IO_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) lapic_update_legacy_vectors();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) #ifdef CONFIG_UP_LATE_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) void __init up_late_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (apic_intr_mode == APIC_PIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) /* Setup local timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) x86_init.timers.setup_percpu_clockev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * Power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) * 'active' is true if the local APIC was enabled by us and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * not the BIOS; this signifies that we are also responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * for disabling it before entering apm/acpi suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) /* r/w apic fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) unsigned int apic_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) unsigned int apic_taskpri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) unsigned int apic_ldr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) unsigned int apic_dfr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) unsigned int apic_spiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) unsigned int apic_lvtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) unsigned int apic_lvtpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) unsigned int apic_lvt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) unsigned int apic_lvt1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) unsigned int apic_lvterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) unsigned int apic_tmict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) unsigned int apic_tdcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) unsigned int apic_thmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) unsigned int apic_cmci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) } apic_pm_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) static int lapic_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) int maxlvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (!apic_pm_state.active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) maxlvt = lapic_get_maxlvt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) apic_pm_state.apic_id = apic_read(APIC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) apic_pm_state.apic_ldr = apic_read(APIC_LDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) apic_pm_state.apic_dfr = apic_read(APIC_DFR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (maxlvt >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) #ifdef CONFIG_X86_THERMAL_VECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (maxlvt >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) #ifdef CONFIG_X86_MCE_INTEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (maxlvt >= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * Mask IOAPIC before disabling the local APIC to prevent stale IRR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * entries on some implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) mask_ioapic_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) disable_local_APIC();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) irq_remapping_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) static void lapic_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) unsigned int l, h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) int maxlvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (!apic_pm_state.active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * IO-APIC and PIC have their own resume routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * We just mask them here to make sure the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * subsystem is completely quiet while we enable x2apic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * and interrupt-remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) mask_ioapic_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) legacy_pic->mask_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (x2apic_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) __x2apic_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * Make sure the APICBASE points to the right address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * FIXME! This will be wrong if we ever support suspend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * SMP! We'll need to do this as part of the CPU restore!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) if (boot_cpu_data.x86 >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) rdmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) l &= ~MSR_IA32_APICBASE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) wrmsr(MSR_IA32_APICBASE, l, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) maxlvt = lapic_get_maxlvt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) apic_write(APIC_ID, apic_pm_state.apic_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) apic_write(APIC_DFR, apic_pm_state.apic_dfr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) apic_write(APIC_LDR, apic_pm_state.apic_ldr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) #ifdef CONFIG_X86_THERMAL_VECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (maxlvt >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) #ifdef CONFIG_X86_MCE_INTEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (maxlvt >= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) if (maxlvt >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) irq_remapping_reenable(x2apic_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * This device has no shutdown method - fully functioning local APICs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) * are needed on every CPU up until machine_halt/restart/poweroff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) static struct syscore_ops lapic_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) .resume = lapic_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) .suspend = lapic_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) static void apic_pm_activate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) apic_pm_state.active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) static int __init init_lapic_sysfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) if (boot_cpu_has(X86_FEATURE_APIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) register_syscore_ops(&lapic_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) /* local apic needs to resume before other devices access its registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) core_initcall(init_lapic_sysfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) #else /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) static void apic_pm_activate(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) static int multi_checked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) static int multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) static int set_multi(const struct dmi_system_id *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) multi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static const struct dmi_system_id multi_dmi_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) .callback = set_multi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) .ident = "IBM System Summit2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) .matches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) static void dmi_check_multi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (multi_checked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) dmi_check_system(multi_dmi_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) multi_checked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) * apic_is_clustered_box() -- Check if we can expect good TSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * Thus far, the major user of this is IBM's Summit2 series:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) * Clustered boxes may have unsynced TSC problems if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * multi-chassis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * Use DMI to check them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) int apic_is_clustered_box(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) dmi_check_multi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * APIC command line parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) static int __init setup_disableapic(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) disable_apic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) setup_clear_cpu_cap(X86_FEATURE_APIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) early_param("disableapic", setup_disableapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* same as disableapic, for compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) static int __init setup_nolapic(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) return setup_disableapic(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) early_param("nolapic", setup_nolapic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) static int __init parse_lapic_timer_c2_ok(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) local_apic_timer_c2_ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) static int __init parse_disable_apic_timer(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) disable_apic_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) early_param("noapictimer", parse_disable_apic_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) static int __init parse_nolapic_timer(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) disable_apic_timer = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) early_param("nolapic_timer", parse_nolapic_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) static int __init apic_set_verbosity(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (!arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) skip_ioapic_setup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) if (strcmp("debug", arg) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) apic_verbosity = APIC_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) else if (strcmp("verbose", arg) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) apic_verbosity = APIC_VERBOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) pr_warn("APIC Verbosity level %s not recognised"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) " use apic=verbose or apic=debug\n", arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) early_param("apic", apic_set_verbosity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) static int __init lapic_insert_resource(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (!apic_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) /* Put local APIC into the resource map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) lapic_resource.start = apic_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) insert_resource(&iomem_resource, &lapic_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * need call insert after e820__reserve_resources()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * that is using request_resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) late_initcall(lapic_insert_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) static int __init apic_set_disabled_cpu_apicid(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (!arg || !get_option(&arg, &disabled_cpu_apicid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) static int __init apic_set_extnmi(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (!strncmp("all", arg, 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) apic_extnmi = APIC_EXTNMI_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) else if (!strncmp("none", arg, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) apic_extnmi = APIC_EXTNMI_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) else if (!strncmp("bsp", arg, 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) apic_extnmi = APIC_EXTNMI_BSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) early_param("apic_extnmi", apic_set_extnmi);