^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* cpu_feature_enabled() cannot be used this early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define USE_EARLY_PGTABLE_L5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/smt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kgdb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cmdline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/doublefault.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/archrandom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/vsyscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/hwcap2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/bugs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/memtype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/microcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/microcode_intel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/uv/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "cpu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 elf_hwcap2 __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* all of these masks are initialized in setup_cpu_local_masks() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) cpumask_var_t cpu_initialized_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) cpumask_var_t cpu_callout_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) cpumask_var_t cpu_callin_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* representing cpus for which sibling maps can be computed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) cpumask_var_t cpu_sibling_setup_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Number of siblings per CPU package */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int smp_num_siblings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) EXPORT_SYMBOL(smp_num_siblings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Last level cache ID of each logical CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* correctly size the local cpu masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void __init setup_cpu_local_masks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) alloc_bootmem_cpumask_var(&cpu_initialized_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) alloc_bootmem_cpumask_var(&cpu_callin_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) alloc_bootmem_cpumask_var(&cpu_callout_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void default_init(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) cpu_detect_cache_sizes(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Not much we can do here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Check if at least it has cpuid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (c->cpuid_level == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* No cpuid. It must be an ancient CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (c->x86 == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) strcpy(c->x86_model_id, "486");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) else if (c->x86 == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) strcpy(c->x86_model_id, "386");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static const struct cpu_dev default_cpu = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .c_init = default_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .c_vendor = "Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .c_x86_vendor = X86_VENDOR_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static const struct cpu_dev *this_cpu = &default_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * We need valid kernel segments for data and code in long mode too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * IRET will check the segment types kkeil 2000/10/28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Also sysret mandates a special GDT layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * TLS descriptors are currently at a different place compared to i386.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Hopefully nobody expects them at a fixed place (Wine?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Segments used for calling PnP BIOS have byte granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * They code segments and data segments have fixed 64k limits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * the transfer segment sizes are set at run time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* 32-bit code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* 16-bit code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* 16-bit data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* 16-bit data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* 16-bit data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * The APM segments have byte granularity and their bases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * are set at run time. All have 64k limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* 32-bit code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* 16-bit code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) GDT_STACK_CANARY_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int __init x86_nopcid_setup(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* nopcid doesn't accept parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* do not emit a message if the feature is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!boot_cpu_has(X86_FEATURE_PCID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) setup_clear_cpu_cap(X86_FEATURE_PCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pr_info("nopcid: PCID feature disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) early_param("nopcid", x86_nopcid_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int __init x86_noinvpcid_setup(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* noinvpcid doesn't accept parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* do not emit a message if the feature is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!boot_cpu_has(X86_FEATURE_INVPCID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) setup_clear_cpu_cap(X86_FEATURE_INVPCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_info("noinvpcid: INVPCID feature disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) early_param("noinvpcid", x86_noinvpcid_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int cachesize_override = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int disable_x86_serial_nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static int __init cachesize_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) get_option(&str, &cachesize_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __setup("cachesize=", cachesize_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int __init x86_sep_setup(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) setup_clear_cpu_cap(X86_FEATURE_SEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __setup("nosep", x86_sep_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Standard macro to see if a specific flag is changeable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline int flag_is_changeable_p(u32 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u32 f1, f2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Cyrix and IDT cpus allow disabling of CPUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * so the code below may return different results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * when it is executed before and after enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * the CPUID. Add "volatile" to not allow gcc to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * optimize the subsequent calls to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) asm volatile ("pushfl \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) "pushfl \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) "popl %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "movl %0, %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) "xorl %2, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) "pushl %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) "popfl \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) "pushfl \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) "popl %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) "popfl \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) : "=&r" (f1), "=&r" (f2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) : "ir" (flag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return ((f1^f2) & flag) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Probe for the CPUID instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int have_cpuid_p(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return flag_is_changeable_p(X86_EFLAGS_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Disable processor serial number: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) lo |= 0x200000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pr_notice("CPU serial number disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) clear_cpu_cap(c, X86_FEATURE_PN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Disabling the serial number may affect the cpuid level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) c->cpuid_level = cpuid_eax(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int __init x86_serial_nr_setup(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) disable_x86_serial_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) __setup("serialnumber", x86_serial_nr_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline int flag_is_changeable_p(u32 flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static __init int setup_disable_smep(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) setup_clear_cpu_cap(X86_FEATURE_SMEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) __setup("nosmep", setup_disable_smep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static __always_inline void setup_smep(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (cpu_has(c, X86_FEATURE_SMEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) cr4_set_bits(X86_CR4_SMEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static __init int setup_disable_smap(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) setup_clear_cpu_cap(X86_FEATURE_SMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) __setup("nosmap", setup_disable_smap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static __always_inline void setup_smap(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long eflags = native_save_fl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* This should have been cleared long ago */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) BUG_ON(eflags & X86_EFLAGS_AC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (cpu_has(c, X86_FEATURE_SMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #ifdef CONFIG_X86_SMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cr4_set_bits(X86_CR4_SMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) clear_cpu_cap(c, X86_FEATURE_SMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cr4_clear_bits(X86_CR4_SMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static __always_inline void setup_umip(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Check the boot processor, plus build option for UMIP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!cpu_feature_enabled(X86_FEATURE_UMIP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Check the current processor's cpuid bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!cpu_has(c, X86_FEATURE_UMIP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) cr4_set_bits(X86_CR4_UMIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * Make sure UMIP is disabled in case it was enabled in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * previous boot (e.g., via kexec).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) cr4_clear_bits(X86_CR4_UMIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* These bits should not change their value after CPU init is finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static const unsigned long cr4_pinned_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static unsigned long cr4_pinned_bits __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void native_write_cr0(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long bits_missing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) set_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (static_branch_likely(&cr_pinning)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) bits_missing = X86_CR0_WP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) val |= bits_missing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto set_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Warn after we've set the missing bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) EXPORT_SYMBOL(native_write_cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) void native_write_cr4(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned long bits_changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) set_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (static_branch_likely(&cr_pinning)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto set_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Warn after we've corrected the changed bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bits_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #if IS_MODULE(CONFIG_LKDTM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) EXPORT_SYMBOL_GPL(native_write_cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void cr4_update_irqsoff(unsigned long set, unsigned long clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) newval = (cr4 & ~clear) | set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (newval != cr4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) this_cpu_write(cpu_tlbstate.cr4, newval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) __write_cr4(newval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) EXPORT_SYMBOL(cr4_update_irqsoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Read the CR4 shadow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned long cr4_read_shadow(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return this_cpu_read(cpu_tlbstate.cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) EXPORT_SYMBOL_GPL(cr4_read_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void cr4_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned long cr4 = __read_cr4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (boot_cpu_has(X86_FEATURE_PCID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cr4 |= X86_CR4_PCIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (static_branch_likely(&cr_pinning))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) __write_cr4(cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Initialize cr4 shadow for this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) this_cpu_write(cpu_tlbstate.cr4, cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Once CPU feature detection is finished (and boot params have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * parsed), record any of the sensitive CR bits that are set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * enable CR pinning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void __init setup_cr_pinning(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static_key_enable(&cr_pinning.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static __init int x86_nofsgsbase_setup(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Require an exact match without trailing characters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (strlen(arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Do not emit a message if the feature is not present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pr_info("FSGSBASE disabled via kernel command line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) __setup("nofsgsbase", x86_nofsgsbase_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Protection Keys are not available in 32-bit mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static bool pku_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static __always_inline void setup_pku(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct pkru_state *pk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* check the boot processor, plus compile options for PKU: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!cpu_feature_enabled(X86_FEATURE_PKU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* checks the actual processor's cpuid bits: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!cpu_has(c, X86_FEATURE_PKU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (pku_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) cr4_set_bits(X86_CR4_PKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (pk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) pk->pkru = init_pkru_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * cpuid bit to be set. We need to ensure that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * update that bit in this CPU's "cpu_info".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) set_cpu_cap(c, X86_FEATURE_OSPKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static __init int setup_disable_pku(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Do not clear the X86_FEATURE_PKU bit. All of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * runtime checks are against OSPKE so clearing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * bit does nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * This way, we will see "pku" in cpuinfo, but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * "ospke", which is exactly what we want. It shows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * that the CPU has PKU, but the OS has not enabled it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * This happens to be exactly how a system would look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * if we disabled the config option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pku_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __setup("nopku", setup_disable_pku);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #endif /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Some CPU features depend on higher CPUID levels, which may not always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * be available due to CPUID level capping or broken virtualization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * software. Add those features to this table to auto-disable them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct cpuid_dependent_feature {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) u32 feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) u32 level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static const struct cpuid_dependent_feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) cpuid_dependent_features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) { X86_FEATURE_MWAIT, 0x00000005 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) { X86_FEATURE_DCA, 0x00000009 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) { X86_FEATURE_XSAVE, 0x0000000d },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) { 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) const struct cpuid_dependent_feature *df;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) for (df = cpuid_dependent_features; df->feature; df++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!cpu_has(c, df->feature))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Note: cpuid_level is set to -1 if unavailable, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * extended_extended_level is set to 0 if unavailable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * and the legitimate extended levels are all negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * when signed; hence the weird messing around with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * signs here...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!((s32)df->level < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) (u32)df->level > (u32)c->extended_cpuid_level :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) (s32)df->level > (s32)c->cpuid_level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) clear_cpu_cap(c, df->feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) x86_cap_flag(df->feature), df->level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Naming convention should be: <Name> [(<Codename>)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * This table only is used unless init_<vendor>() below doesn't set it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * in particular, if CPUID levels 0x80000002..4 are supported, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * isn't used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Look up CPU names by table lookup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static const char *table_lookup_model(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) const struct legacy_cpu_model_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (c->x86_model >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return NULL; /* Range check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (!this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) info = this_cpu->legacy_models;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) while (info->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (info->family == c->x86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return info->model_names[c->x86_model];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) info++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return NULL; /* Not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) void load_percpu_segment(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) loadsegment(fs, __KERNEL_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) __loadsegment_simple(gs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) load_stack_canary_segment();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* The 32-bit entry code needs to find cpu_entry_area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Load the original GDT from the per-cpu structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void load_direct_gdt(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct desc_ptr gdt_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) gdt_descr.size = GDT_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) load_gdt(&gdt_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) EXPORT_SYMBOL_GPL(load_direct_gdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Load a fixmap remapping of the per-cpu GDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) void load_fixmap_gdt(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct desc_ptr gdt_descr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) gdt_descr.size = GDT_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) load_gdt(&gdt_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) EXPORT_SYMBOL_GPL(load_fixmap_gdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Current gdt points %fs at the "master" per-cpu area: after this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * it's on the real one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void switch_to_new_gdt(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Load the original GDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) load_direct_gdt(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* Reload the per-cpu base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) load_percpu_segment(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void get_model_name(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unsigned int *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) char *p, *q, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (c->extended_cpuid_level < 0x80000004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) v = (unsigned int *)c->x86_model_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) c->x86_model_id[48] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Trim whitespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) p = q = s = &c->x86_model_id[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) while (*p == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Note the last non-whitespace index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!isspace(*p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) s = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) *q++ = *p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *(s + 1) = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) void detect_num_cpu_cores(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) c->x86_max_cores = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (eax & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) c->x86_max_cores = (eax >> 26) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) unsigned int n, dummy, ebx, ecx, edx, l2size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) n = c->extended_cpuid_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (n >= 0x80000005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) c->x86_cache_size = (ecx>>24) + (edx>>24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* On K8 L1 TLB is inclusive, so don't count it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) c->x86_tlbsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (n < 0x80000006) /* Some chips just has a large L1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) l2size = ecx >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* do processor-specific cache resizing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (this_cpu->legacy_cache_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) l2size = this_cpu->legacy_cache_size(c, l2size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Allow user to override all this if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (cachesize_override != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) l2size = cachesize_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (l2size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return; /* Again, no L2 cache is possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) c->x86_cache_size = l2size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u16 __read_mostly tlb_lli_4k[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) u16 __read_mostly tlb_lli_2m[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) u16 __read_mostly tlb_lli_4m[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) u16 __read_mostly tlb_lld_4k[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) u16 __read_mostly tlb_lld_2m[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) u16 __read_mostly tlb_lld_4m[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u16 __read_mostly tlb_lld_1g[NR_INFO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static void cpu_detect_tlb(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (this_cpu->c_detect_tlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) this_cpu->c_detect_tlb(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) tlb_lli_4m[ENTRIES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int detect_ht_early(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!cpu_has(c, X86_FEATURE_HT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) cpuid(1, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) smp_num_siblings = (ebx & 0xff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (smp_num_siblings == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) pr_info_once("CPU0: Hyper-Threading is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) void detect_ht(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int index_msb, core_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (detect_ht_early(c) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) index_msb = get_count_order(smp_num_siblings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) smp_num_siblings = smp_num_siblings / c->x86_max_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) index_msb = get_count_order(smp_num_siblings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) core_bits = get_count_order(c->x86_max_cores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ((1 << core_bits) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static void get_cpu_vendor(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) char *v = c->x86_vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) for (i = 0; i < X86_VENDOR_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!cpu_devs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) (cpu_devs[i]->c_ident[1] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) !strcmp(v, cpu_devs[i]->c_ident[1]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) this_cpu = cpu_devs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) c->x86_vendor = this_cpu->c_x86_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) "CPU: Your system may be unstable.\n", v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) c->x86_vendor = X86_VENDOR_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) this_cpu = &default_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) void cpu_detect(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Get vendor name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) (unsigned int *)&c->x86_vendor_id[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) (unsigned int *)&c->x86_vendor_id[8],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) (unsigned int *)&c->x86_vendor_id[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) c->x86 = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Intel-defined flags: level 0x00000001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (c->cpuid_level >= 0x00000001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u32 junk, tfms, cap0, misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) c->x86 = x86_family(tfms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) c->x86_model = x86_model(tfms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) c->x86_stepping = x86_stepping(tfms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (cap0 & (1<<19)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) c->x86_cache_alignment = c->x86_clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static void apply_forced_caps(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) c->x86_capability[i] &= ~cpu_caps_cleared[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) c->x86_capability[i] |= cpu_caps_set[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void init_speculation_control(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * and they also have a different bit for STIBP support. Also,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * a hypervisor might have set the individual AMD bits even on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Intel CPUs, for finer-grained selection of what's available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) set_cpu_cap(c, X86_FEATURE_IBRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) set_cpu_cap(c, X86_FEATURE_IBPB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) set_cpu_cap(c, X86_FEATURE_STIBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cpu_has(c, X86_FEATURE_VIRT_SSBD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) set_cpu_cap(c, X86_FEATURE_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) set_cpu_cap(c, X86_FEATURE_IBRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (cpu_has(c, X86_FEATURE_AMD_IBPB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) set_cpu_cap(c, X86_FEATURE_IBPB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) set_cpu_cap(c, X86_FEATURE_STIBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) set_cpu_cap(c, X86_FEATURE_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) void get_cpu_cap(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* Intel-defined flags: level 0x00000001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (c->cpuid_level >= 0x00000001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) c->x86_capability[CPUID_1_ECX] = ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) c->x86_capability[CPUID_1_EDX] = edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (c->cpuid_level >= 0x00000006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Additional Intel-defined flags: level 0x00000007 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (c->cpuid_level >= 0x00000007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) c->x86_capability[CPUID_7_0_EBX] = ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) c->x86_capability[CPUID_7_ECX] = ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) c->x86_capability[CPUID_7_EDX] = edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Check valid sub-leaf index before accessing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (eax >= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) c->x86_capability[CPUID_7_1_EAX] = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /* Extended state features: level 0x0000000d */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (c->cpuid_level >= 0x0000000d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) c->x86_capability[CPUID_D_1_EAX] = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* AMD-defined flags: level 0x80000001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) eax = cpuid_eax(0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) c->extended_cpuid_level = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if ((eax & 0xffff0000) == 0x80000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (eax >= 0x80000001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) c->x86_capability[CPUID_8000_0001_ECX] = ecx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) c->x86_capability[CPUID_8000_0001_EDX] = edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (c->extended_cpuid_level >= 0x80000007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) c->x86_capability[CPUID_8000_0007_EBX] = ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) c->x86_power = edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (c->extended_cpuid_level >= 0x80000008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) c->x86_capability[CPUID_8000_0008_EBX] = ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (c->extended_cpuid_level >= 0x8000000a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) init_scattered_cpuid_features(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) init_speculation_control(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Clear/Set all flags overridden by options, after probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * This needs to happen each time we re-probe, which may happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * several times during CPU initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) apply_forced_caps(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) void get_cpu_address_sizes(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (c->extended_cpuid_level >= 0x80000008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) c->x86_virt_bits = (eax >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) c->x86_phys_bits = eax & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) c->x86_phys_bits = 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) c->x86_cache_bits = c->x86_phys_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * First of all, decide if this is a 486 or higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * It's a 486 if we can modify the AC flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (flag_is_changeable_p(X86_EFLAGS_AC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) c->x86 = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) c->x86 = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) for (i = 0; i < X86_VENDOR_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (cpu_devs[i] && cpu_devs[i]->c_identify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) c->x86_vendor_id[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) cpu_devs[i]->c_identify(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (c->x86_vendor_id[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) get_cpu_vendor(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) #define NO_SPECULATION BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #define NO_MELTDOWN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #define NO_SSB BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) #define NO_L1TF BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #define NO_MDS BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #define MSBDS_ONLY BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #define NO_SWAPGS BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) #define NO_ITLB_MULTIHIT BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) #define NO_SPECTRE_V2 BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #define VULNWL(vendor, family, model, whitelist) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) #define VULNWL_INTEL(model, whitelist) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) #define VULNWL_AMD(family, whitelist) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #define VULNWL_HYGON(family, whitelist) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Intel Family 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) VULNWL_INTEL(CORE_YONAH, NO_SSB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Technically, swapgs isn't serializing on AMD (despite it previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * being documented as such in the APM). But according to AMD, %gs is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * updated non-speculatively, and the issuing of %gs-relative memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * operands will be blocked until the %gs update completes, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * good enough for our purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* AMD Family 0xf - 0x12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* Zhaoxin Family 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) INTEL_FAM6_##model, steppings, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) X86_FEATURE_ANY, issues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #define SRBDS BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) const struct x86_cpu_id *m = x86_match_cpu(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return m && !!(m->driver_data & which);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) u64 x86_read_arch_cap_msr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u64 ia32_cap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return ia32_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) u64 ia32_cap = x86_read_arch_cap_msr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) !(ia32_cap & ARCH_CAP_SSB_NO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (ia32_cap & ARCH_CAP_IBRS_ALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) !(ia32_cap & ARCH_CAP_MDS_NO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) setup_force_cpu_bug(X86_BUG_MDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) setup_force_cpu_bug(X86_BUG_SWAPGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * - TSX is supported or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * - TSX_CTRL is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * TSX_CTRL check is needed for cases when TSX could be disabled before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * the kernel boot e.g. kexec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * TSX_CTRL check alone is not sufficient for cases when the microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * update is not present or running as guest that don't get TSX_CTRL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) (cpu_has(c, X86_FEATURE_RTM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) setup_force_cpu_bug(X86_BUG_TAA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * in the vulnerability blacklist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if ((cpu_has(c, X86_FEATURE_RDRAND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) cpu_has(c, X86_FEATURE_RDSEED)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) cpu_matches(cpu_vuln_blacklist, SRBDS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) setup_force_cpu_bug(X86_BUG_SRBDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* Rogue Data Cache Load? No! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (ia32_cap & ARCH_CAP_RDCL_NO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) setup_force_cpu_bug(X86_BUG_L1TF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * unfortunately, that's not true in practice because of early VIA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * chips and (more importantly) broken virtualizers that are not easy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * to detect. In the latter case it doesn't even *fail* reliably, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * probing for it doesn't even work. Disable it completely on 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * unless we can find a reliable way to detect all the broken cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static void detect_nopl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) setup_clear_cpu_cap(X86_FEATURE_NOPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) setup_force_cpu_cap(X86_FEATURE_NOPL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * We parse cpu parameters early because fpu__init_system() is executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * before parse_early_param().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void __init cpu_parse_early_param(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) char arg[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) char *argptr = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) int arglen, res, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (cmdline_find_option_bool(boot_command_line, "no387"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #ifdef CONFIG_MATH_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) setup_clear_cpu_cap(X86_FEATURE_FPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) setup_clear_cpu_cap(X86_FEATURE_FXSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (cmdline_find_option_bool(boot_command_line, "noxsave"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) setup_clear_cpu_cap(X86_FEATURE_XSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) setup_clear_cpu_cap(X86_FEATURE_XSAVES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (arglen <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) pr_info("Clearing CPUID bits:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) res = get_option(&argptr, &bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (res == 0 || res == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* If the argument was too long, the last bit may be cut off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (res == 1 && arglen >= sizeof(arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (bit >= 0 && bit < NCAPINTS * 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) setup_clear_cpu_cap(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) } while (res == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * Do minimum CPU detection early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * Fields really needed: vendor, cpuid_level, family, model, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * cache alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * The others are not touched to avoid unwanted side effects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * WARNING: this function is only called on the boot CPU. Don't add code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * here that is supposed to run on all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void __init early_identify_cpu(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) c->x86_clflush_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) c->x86_phys_bits = 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) c->x86_virt_bits = 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) c->x86_clflush_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) c->x86_phys_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) c->x86_virt_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) c->x86_cache_alignment = c->x86_clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) memset(&c->x86_capability, 0, sizeof(c->x86_capability));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) c->extended_cpuid_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (!have_cpuid_p())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) identify_cpu_without_cpuid(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* cyrix could have cpuid enabled via c_identify()*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (have_cpuid_p()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cpu_detect(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) get_cpu_vendor(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) get_cpu_cap(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) get_cpu_address_sizes(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) setup_force_cpu_cap(X86_FEATURE_CPUID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) cpu_parse_early_param();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (this_cpu->c_early_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) this_cpu->c_early_init(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) c->cpu_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) filter_cpuid_features(c, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (this_cpu->c_bsp_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) this_cpu->c_bsp_init(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) setup_clear_cpu_cap(X86_FEATURE_CPUID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) setup_force_cpu_cap(X86_FEATURE_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) cpu_set_bug_bits(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) cpu_set_core_cap_bits(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) fpu__init_system(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * Regardless of whether PCID is enumerated, the SDM says
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * that it can't be enabled in 32-bit mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) setup_clear_cpu_cap(X86_FEATURE_PCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * Later in the boot process pgtable_l5_enabled() relies on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * enabled by this point we need to clear the feature bit to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * false-positives at the later stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * pgtable_l5_enabled() can be false here for several reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * - 5-level paging is disabled compile-time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * - it's 32-bit kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * - machine doesn't support 5-level paging;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * - user specified 'no5lvl' in kernel command line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (!pgtable_l5_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) setup_clear_cpu_cap(X86_FEATURE_LA57);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) detect_nopl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) void __init early_cpu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) const struct cpu_dev *const *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) #ifdef CONFIG_PROCESSOR_SELECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) pr_info("KERNEL supported cpus:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) const struct cpu_dev *cpudev = *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (count >= X86_VENDOR_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) cpu_devs[count] = cpudev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) #ifdef CONFIG_PROCESSOR_SELECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (j = 0; j < 2; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (!cpudev->c_ident[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) pr_info(" %s %s\n", cpudev->c_vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) cpudev->c_ident[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) early_identify_cpu(&boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static bool detect_null_seg_behavior(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * Empirically, writing zero to a segment selector on AMD does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * not clear the base, whereas writing zero to a segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * selector on Intel does clear the base. Intel's behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * allows slightly faster context switches in the common case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * where GS is unused by the prev and next threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * Since neither vendor documents this anywhere that I can see,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * detect it directly instead of hardcoding the choice by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * vendor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * I've designated AMD's behavior as the "bug" because it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * counterintuitive and less friendly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) unsigned long old_base, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) rdmsrl(MSR_FS_BASE, old_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) wrmsrl(MSR_FS_BASE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) loadsegment(fs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) rdmsrl(MSR_FS_BASE, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) wrmsrl(MSR_FS_BASE, old_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return tmp == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) void check_null_seg_clears_base(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* BUG_NULL_SEG is only relevant with 64bit userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (!IS_ENABLED(CONFIG_X86_64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (c->extended_cpuid_level >= 0x80000021 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) cpuid_eax(0x80000021) & BIT(6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * CPUID bit above wasn't set. If this kernel is still running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * as a HV guest, then the HV has decided not to advertize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * that CPUID bit for whatever reason. For example, one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * member of the migration pool might be vulnerable. Which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * means, the bug is present: set the BUG flag and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) set_cpu_bug(c, X86_BUG_NULL_SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * Zen2 CPUs also have this behaviour, but no CPUID bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * 0x18 is the respective family for Hygon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if ((c->x86 == 0x17 || c->x86 == 0x18) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) detect_null_seg_behavior())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* All the remaining ones are affected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) set_cpu_bug(c, X86_BUG_NULL_SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void generic_identify(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) c->extended_cpuid_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!have_cpuid_p())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) identify_cpu_without_cpuid(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /* cyrix could have cpuid enabled via c_identify()*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!have_cpuid_p())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) cpu_detect(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) get_cpu_vendor(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) get_cpu_cap(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) get_cpu_address_sizes(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (c->cpuid_level >= 0x00000001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) # ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) # else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) c->apicid = c->initial_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) c->phys_proc_id = c->initial_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) get_model_name(c); /* Default name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * ESPFIX is a strange bug. All real CPUs have it. Paravirt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * systems that run Linux at CPL > 0 may or may not have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * issue, but, even if they have the issue, there's absolutely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * nothing we can do about it because we can't use the real IRET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * NB: For the time being, only 32-bit kernels support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * X86_BUG_ESPFIX as such. 64-bit kernels directly choose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * whether to apply espfix using paravirt hooks. If any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * non-paravirt system ever shows up that does *not* have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * ESPFIX issue, we can change this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) set_cpu_bug(c, X86_BUG_ESPFIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * Validate that ACPI/mptables have the same information about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * effective APIC id and update the package map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) unsigned int apicid, cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) apicid = apic->cpu_present_to_apicid(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (apicid != c->apicid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) cpu, apicid, c->initial_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) c->logical_proc_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * This does the hard work of actually picking apart the CPU stuff...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static void identify_cpu(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) c->loops_per_jiffy = loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) c->x86_cache_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) c->x86_vendor = X86_VENDOR_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) c->x86_model = c->x86_stepping = 0; /* So far unknown... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) c->x86_vendor_id[0] = '\0'; /* Unset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) c->x86_model_id[0] = '\0'; /* Unset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) c->x86_max_cores = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) c->x86_coreid_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) c->cu_id = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) c->x86_clflush_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) c->x86_phys_bits = 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) c->x86_virt_bits = 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) c->cpuid_level = -1; /* CPUID not detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) c->x86_clflush_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) c->x86_phys_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) c->x86_virt_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) c->x86_cache_alignment = c->x86_clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) memset(&c->x86_capability, 0, sizeof(c->x86_capability));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) #ifdef CONFIG_X86_VMX_FEATURE_NAMES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) generic_identify(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (this_cpu->c_identify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) this_cpu->c_identify(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) /* Clear/Set all flags overridden by options, after probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) apply_forced_caps(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * Vendor-specific initialization. In this section we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * canonicalize the feature flags, meaning if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * features a certain CPU supports which CPUID doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * tell us, CPUID claiming incorrect flags, or other bugs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * we handle them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * At the end of this section, c->x86_capability better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * indicate the features this CPU genuinely supports!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (this_cpu->c_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) this_cpu->c_init(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) /* Disable the PN if appropriate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) squash_the_stupid_serial_number(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* Set up SMEP/SMAP/UMIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) setup_smep(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) setup_smap(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) setup_umip(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /* Enable FSGSBASE instructions if available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) cr4_set_bits(X86_CR4_FSGSBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) elf_hwcap2 |= HWCAP2_FSGSBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * The vendor-specific functions might have changed features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * Now we do "generic changes."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* Filter out anything that depends on CPUID levels we don't have */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) filter_cpuid_features(c, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /* If the model name is still unset, do table lookup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (!c->x86_model_id[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) const char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) p = table_lookup_model(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) strcpy(c->x86_model_id, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /* Last resort... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) sprintf(c->x86_model_id, "%02x/%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) c->x86, c->x86_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) detect_ht(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) x86_init_rdrand(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) setup_pku(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * Clear/Set all flags overridden by options, need do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * before following smp all cpus cap AND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) apply_forced_caps(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * On SMP, boot_cpu_data holds the common feature set between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * all CPUs; so make sure that we indicate which features are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * common between the CPUs. The first time this routine gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * executed, c == &boot_cpu_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (c != &boot_cpu_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* AND the already accumulated flags with these */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) for (i = 0; i < NCAPINTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* OR, i.e. replicate the bug flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) /* Init Machine Check Exception if available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) mcheck_cpu_init(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) select_idle_routine(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) numa_add_cpu(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * on 32-bit kernels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) void enable_sep_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct tss_struct *tss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!boot_cpu_has(X86_FEATURE_SEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) tss = &per_cpu(cpu_tss_rw, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * see the big comment in struct x86_hw_tss's definition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) tss->x86_tss.ss1 = __KERNEL_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) void __init identify_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) identify_cpu(&boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) sysenter_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) enable_sep_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) cpu_detect_tlb(&boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) setup_cr_pinning();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) tsx_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) void identify_secondary_cpu(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) BUG_ON(c == &boot_cpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) identify_cpu(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) enable_sep_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) mtrr_ap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) validate_apic_and_package_id(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) x86_spec_ctrl_setup_ap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) update_srbds_msr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static __init int setup_noclflush(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) __setup("noclflush", setup_noclflush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) void print_cpu_info(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) const char *vendor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (c->x86_vendor < X86_VENDOR_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) vendor = this_cpu->c_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (c->cpuid_level >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) vendor = c->x86_vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (vendor && !strstr(c->x86_model_id, vendor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) pr_cont("%s ", vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (c->x86_model_id[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) pr_cont("%s", c->x86_model_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) pr_cont("%d86", c->x86);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (c->x86_stepping || c->cpuid_level >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) pr_cont(")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * clearcpuid= was already parsed in fpu__init_parse_early_param.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * But we need to keep a dummy __setup around otherwise it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * show up as an environment variable for init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static __init int setup_clearcpuid(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) __setup("clearcpuid=", setup_clearcpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * The following percpu variables are hot. Align current_task to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * cacheline size such that they fall in the same cacheline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) &init_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) EXPORT_PER_CPU_SYMBOL(current_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) EXPORT_PER_CPU_SYMBOL(__preempt_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /* May not be marked __init: used by software suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) void syscall_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) #ifdef CONFIG_IA32_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * This only works on Intel CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * This does not cause SYSENTER to jump to the wrong location, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /* Flags to clear on syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) wrmsrl(MSR_SYSCALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) #else /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) EXPORT_PER_CPU_SYMBOL(current_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) EXPORT_PER_CPU_SYMBOL(__preempt_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * the top of the kernel stack. Use an extra percpu variable to track the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * top of the kernel stack directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) (unsigned long)&init_thread_union + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) #ifdef CONFIG_STACKPROTECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) #endif /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * Clear all 6 debug registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) static void clear_all_debug_regs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /* Ignore db4, db5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if ((i == 4) || (i == 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) set_debugreg(0, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) #ifdef CONFIG_KGDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * Restore debug regs if using kgdbwait and you have a kernel debugger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * connection established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static void dbg_restore_debug_regs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) arch_kgdb_ops.correct_hw_break();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) #else /* ! CONFIG_KGDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) #define dbg_restore_debug_regs()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) #endif /* ! CONFIG_KGDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static void wait_for_master_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * wait for ACK from master CPU before continuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * with AP initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) while (!cpumask_test_cpu(cpu, cpu_callout_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static inline void setup_getcpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct desc_struct d = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) write_rdtscp_aux(cpudata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /* Store CPU and node number in limit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) d.limit0 = cpudata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) d.limit1 = cpudata >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) d.type = 5; /* RO data, expand down, accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) d.dpl = 3; /* Visible to user code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) d.s = 1; /* Not a system segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) d.p = 1; /* Present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) d.d = 1; /* 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static inline void ucode_cpu_init(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) load_ucode_ap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static inline void tss_setup_ist(struct tss_struct *tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* Set up the per-CPU TSS IST stacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /* Only mapped when SEV-ES is active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) #else /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static inline void setup_getcpu(int cpu) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) static inline void ucode_cpu_init(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) show_ucode_info_early();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static inline void tss_setup_ist(struct tss_struct *tss) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) #endif /* !CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) static inline void tss_setup_io_bitmap(struct tss_struct *tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) #ifdef CONFIG_X86_IOPL_IOPERM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) tss->io_bitmap.prev_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) tss->io_bitmap.prev_sequence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * Invalidate the extra array entry past the end of the all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * permission bitmap as required by the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * Setup everything needed to handle exceptions from the IDT, including the IST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * exceptions which use paranoid_entry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) void cpu_init_exception_handling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /* paranoid_entry() gets the CPU number from the GDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) setup_getcpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* IST vectors need TSS to be set up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) tss_setup_ist(tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) tss_setup_io_bitmap(tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) load_TR_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Finally load the IDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) load_current_idt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * cpu_init() initializes state that is per-CPU. Some data is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * initialized (naturally) in the bootstrap process, such as the GDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * and IDT. We reload them nevertheless, this function acts as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * 'CPU state barrier', nothing should get across.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) void cpu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct task_struct *cur = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) wait_for_master_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) ucode_cpu_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (this_cpu_read(numa_node) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) early_cpu_to_node(cpu) != NUMA_NO_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) set_numa_node(early_cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) setup_getcpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) pr_debug("Initializing CPU#%d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * Initialize the per-CPU GDT with the boot GDT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * and set up the GDT descriptor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) switch_to_new_gdt(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) load_current_idt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (IS_ENABLED(CONFIG_X86_64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) loadsegment(fs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) syscall_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) wrmsrl(MSR_FS_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) wrmsrl(MSR_KERNEL_GS_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) x2apic_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) mmgrab(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) cur->active_mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) BUG_ON(cur->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) initialize_tlbstate_and_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) enter_lazy_tlb(&init_mm, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /* Initialize the TSS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) tss_setup_ist(tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) tss_setup_io_bitmap(tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) load_TR_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * sp0 points to the entry trampoline stack regardless of what task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) load_mm_ldt(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) clear_all_debug_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) dbg_restore_debug_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) doublefault_init_cpu_tss();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) fpu__init_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (is_uv_system())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) uv_cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) load_fixmap_gdt(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * The microcode loader calls this upon late microcode load to recheck features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * only when microcode has been updated. Caller holds microcode_mutex and CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * hotplug lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) void microcode_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) struct cpuinfo_x86 info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) perf_check_microcode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /* Reload CPUID max function as it might've changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) info.cpuid_level = cpuid_eax(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * Copy all capability leafs to pick up the synthetic ones so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * memcmp() below doesn't fail on that. The ones coming from CPUID will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * get overwritten in get_cpu_cap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) get_cpu_cap(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * Invoked from core CPU hotplug code after hotplug operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) void arch_smt_update(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /* Handle the speculative execution misfeatures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) cpu_bugs_smt_update();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) /* Check whether IPI broadcasting can be enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) apic_smt_update();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }