^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/msr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/bugs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/microcode_intel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/hwcap2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/cmdline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/resctrl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "cpu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef CONFIG_X86_LOCAL_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/mpspec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) enum split_lock_detect_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) sld_off = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) sld_warn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) sld_fatal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Default to sld_off because most systems do not support split lock detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * split_lock_setup() will switch this to sld_warn on systems that support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * split lock detect, unless there is a command line override.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static u64 msr_test_ctrl_cache __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * With a name like MSR_TEST_CTL it should go without saying, but don't touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static bool cpu_model_supports_sld __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Processors which have self-snooping capability can handle conflicting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * memory type across CPUs by snooping its own cache. However, there exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * CPU models in which having conflicting memory types still leads to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * unpredictable behavior, machine check errors, or hangs. Clear this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * feature to prevent its use on machines with known erratas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) switch (c->x86_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) case INTEL_FAM6_CORE_YONAH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) case INTEL_FAM6_CORE2_MEROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) case INTEL_FAM6_CORE2_MEROM_L:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) case INTEL_FAM6_CORE2_PENRYN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) case INTEL_FAM6_CORE2_DUNNINGTON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) case INTEL_FAM6_NEHALEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) case INTEL_FAM6_NEHALEM_G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) case INTEL_FAM6_NEHALEM_EP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) case INTEL_FAM6_NEHALEM_EX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case INTEL_FAM6_WESTMERE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) case INTEL_FAM6_WESTMERE_EP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) case INTEL_FAM6_SANDYBRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static bool ring3mwait_disabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int __init ring3mwait_disable(char *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ring3mwait_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __setup("ring3mwait=disable", ring3mwait_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Ring 3 MONITOR/MWAIT feature cannot be detected without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * cpu model and family comparison.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (c->x86 != 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) switch (c->x86_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) case INTEL_FAM6_XEON_PHI_KNL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) case INTEL_FAM6_XEON_PHI_KNM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (ring3mwait_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) this_cpu_or(msr_misc_features_shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (c == &boot_cpu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Early microcode releases for the Spectre v2 mitigation were broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Information taken from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * - https://kb.vmware.com/s/article/52345
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * - Microcode revisions observed in the wild
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * - Release note from 20180108 microcode release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct sku_microcode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u8 model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u8 stepping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 microcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static const struct sku_microcode spectre_bad_microcodes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) { INTEL_FAM6_HASWELL, 0x03, 0x23 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Observed in the wild */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * We know that the hypervisor lie to us on the microcode version so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * we may as well hope that it is running the correct version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (cpu_has(c, X86_FEATURE_HYPERVISOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (c->x86 != 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (c->x86_model == spectre_bad_microcodes[i].model &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) c->x86_stepping == spectre_bad_microcodes[i].stepping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return (c->microcode <= spectre_bad_microcodes[i].microcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void early_init_intel(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u64 misc_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Unmask CPUID levels if masked: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) c->cpuid_level = cpuid_eax(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) get_cpu_cap(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) (c->x86 == 0x6 && c->x86_model >= 0x0e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) c->microcode = intel_get_microcode_revision();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Now if any of them are set, check the blacklist and clear the lot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) setup_clear_cpu_cap(X86_FEATURE_IBRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) setup_clear_cpu_cap(X86_FEATURE_IBPB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) setup_clear_cpu_cap(X86_FEATURE_STIBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) setup_clear_cpu_cap(X86_FEATURE_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Atom erratum AAE44/AAF40/AAG38/AAH41:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * A race condition between speculative fetches and invalidating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * a large page. This is worked around in microcode, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * need the microcode to have already been loaded... so if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * not, recommend a BIOS update and disable large pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) c->microcode < 0x20e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) clear_cpu_cap(c, X86_FEATURE_PSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) set_cpu_cap(c, X86_FEATURE_SYSENTER32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (c->x86 == 15 && c->x86_cache_alignment == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) c->x86_cache_alignment = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* CPUID workaround for 0F33/0F34 CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (c->x86 == 0xF && c->x86_model == 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) c->x86_phys_bits = 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * with P/T states and does not stop in deep C-states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * It is also reliable across cores and sockets. (but not across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * cabinets - we turn it off in that case explicitly.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (c->x86_power & (1 << 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (c->x86 == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) switch (c->x86_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case INTEL_FAM6_ATOM_SALTWELL_MID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) case INTEL_FAM6_ATOM_SALTWELL_TABLET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case INTEL_FAM6_ATOM_SILVERMONT_MID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) case INTEL_FAM6_ATOM_AIRMONT_NP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * There is a known erratum on Pentium III and Core Solo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * and Core Duo CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * " Page with PAT set to WC while associated MTRR is UC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * may consolidate to UC "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Because of this erratum, it is better to stick with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * setting WC in MTRR rather than using PAT on these CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Enable PAT WC only on P4, Core 2 or later CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (c->x86 == 6 && c->x86_model < 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) clear_cpu_cap(c, X86_FEATURE_PAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * clear the fast string and enhanced fast string CPU capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pr_info("Disabled fast string operations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) setup_clear_cpu_cap(X86_FEATURE_ERMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Intel Quark Core DevMan_001.pdf section 6.4.11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * "The operating system also is required to invalidate (i.e., flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * the TLB when any changes are made to any of the page table entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * The operating system must reload CR3 to cause the TLB to be flushed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * to be modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (c->x86 == 5 && c->x86_model == 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_info("Disabling PGE capability bit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) setup_clear_cpu_cap(X86_FEATURE_PGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (c->cpuid_level >= 0x00000001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * If HTT (EDX[28]) is set EBX[16:23] contain the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * apicids which are reserved per package. Store the resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * shift value for the package management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (edx & (1U << 28))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) check_memory_type_self_snoop_errata(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Get the number of SMT siblings early from the extended topology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * leaf, if available. Otherwise try the legacy SMT detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (detect_extended_topology_early(c) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) detect_ht_early(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void bsp_init_intel(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) resctrl_cpu_detect(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * Early probe support logic for ppro memory erratum #50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * This is called before we do cpu ident work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int ppro_with_ram_bug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Uses data from early_cpu_detect now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) boot_cpu_data.x86 == 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) boot_cpu_data.x86_model == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) boot_cpu_data.x86_stepping < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void intel_smp_check(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* calling is from identify_secondary_cpu() ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!c->cpu_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * Mask B, Pentium, but not Pentium MMX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (c->x86 == 5 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) c->x86_model <= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Remember we have B step Pentia with bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) "with B stepping processors.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static int forcepae;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int __init forcepae_setup(char *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) forcepae = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) __setup("forcepae", forcepae_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void intel_workarounds(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #ifdef CONFIG_X86_F00F_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * All models of Pentium and Pentium with MMX technology CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * have the F0 0F bug, which lets nonprivileged users lock up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * system. Announce that the fault handler will be checking for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * The Quark is also family 5, but does not have the same bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) clear_cpu_bug(c, X86_BUG_F00F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (c->x86 == 5 && c->x86_model < 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int f00f_workaround_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) set_cpu_bug(c, X86_BUG_F00F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!f00f_workaround_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) f00f_workaround_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * model 3 mask 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) clear_cpu_cap(c, X86_FEATURE_SEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * PAE CPUID issue: many Pentium M report no PAE but may have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * functionally usable PAE implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Forcefully enable PAE if kernel parameter "forcepae" is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (forcepae) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pr_warn("PAE forced!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) set_cpu_cap(c, X86_FEATURE_PAE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * P4 Xeon erratum 037 workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Hardware prefetcher may cause stale data to be loaded into the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (msr_set_bit(MSR_IA32_MISC_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pr_info("CPU: C0 stepping P4 Xeon detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * See if we have a good local APIC by checking for buggy Pentia,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * i.e. all B steppings and the C2 stepping of P54C when using their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * integrated APIC (see 11AP erratum in "Pentium Processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Specification Update").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) set_cpu_bug(c, X86_BUG_11AP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #ifdef CONFIG_X86_INTEL_USERCOPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Set up the preferred alignment for movsl bulk memory moves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) switch (c->x86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case 4: /* 486: untested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) case 5: /* Old Pentia: untested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) case 6: /* PII/PIII only like movsl with 8-byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) movsl_mask.mask = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) case 15: /* P4 is OK down to 8-byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) movsl_mask.mask = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) intel_smp_check(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void intel_workarounds(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void srat_detect_node(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #ifdef CONFIG_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unsigned node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Don't do the funky fallback heuristics the AMD version employs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) node = numa_cpu_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (node == NUMA_NO_NODE || !node_online(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* reuse the value from init_cpu_to_node() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) numa_set_node(cpu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define MSR_IA32_TME_ACTIVATE 0x982
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Helpers to access TME_ACTIVATE MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define TME_ACTIVATE_POLICY_AES_XTS_128 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Values for mktme_status (SW only construct) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #define MKTME_ENABLED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #define MKTME_DISABLED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #define MKTME_UNINITIALIZED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int mktme_status = MKTME_UNINITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static void detect_tme(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) u64 tme_activate, tme_policy, tme_crypto_algs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int keyid_bits = 0, nr_keyids = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static u64 tme_activate_cpu0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (mktme_status != MKTME_UNINITIALIZED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (tme_activate != tme_activate_cpu0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Broken BIOS? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) pr_err_once("x86/tme: MKTME is not usable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) mktme_status = MKTME_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Proceed. We may need to exclude bits from x86_phys_bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) tme_activate_cpu0 = tme_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pr_info_once("x86/tme: not enabled by BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) mktme_status = MKTME_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (mktme_status != MKTME_UNINITIALIZED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto detect_keyid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pr_info("x86/tme: enabled by BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) tme_policy = TME_ACTIVATE_POLICY(tme_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tme_crypto_algs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) mktme_status = MKTME_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) detect_keyid_bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) nr_keyids = (1UL << keyid_bits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (nr_keyids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pr_info_once("x86/mktme: enabled by BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pr_info_once("x86/mktme: disabled by BIOS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (mktme_status == MKTME_UNINITIALIZED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* MKTME is usable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) mktme_status = MKTME_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * KeyID bits effectively lower the number of physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) c->x86_phys_bits -= keyid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void init_cpuid_fault(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static void init_intel_misc_features(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Clear all MISC features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) this_cpu_write(msr_misc_features_shadow, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Check features and update capabilities and shadow control bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) init_cpuid_fault(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) probe_xeon_phi_r3mwait(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) msr = this_cpu_read(msr_misc_features_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void split_lock_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static void init_intel(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) early_init_intel(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) intel_workarounds(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Detect the extended topology information if available. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * will reinitialise the initial_apicid which will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * in init_intel_cacheinfo()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) detect_extended_topology(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * let's use the legacy cpuid vector 0x1 and 0x4 for topology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) detect_num_cpu_cores(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) detect_ht(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) init_intel_cacheinfo(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (c->cpuid_level > 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned eax = cpuid_eax(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Check for version and the number of counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (cpu_has(c, X86_FEATURE_XMM2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (boot_cpu_has(X86_FEATURE_DS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned int l1, l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!(l1 & (1<<11)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) set_cpu_cap(c, X86_FEATURE_BTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!(l1 & (1<<12)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) set_cpu_cap(c, X86_FEATURE_PEBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) set_cpu_bug(c, X86_BUG_MONITOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (c->x86 == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) c->x86_cache_alignment = c->x86_clflush_size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (c->x86 == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) set_cpu_cap(c, X86_FEATURE_REP_GOOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Names for the Pentium II/Celeron processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * detectable only by also checking the cache size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * Dixon is NOT a Celeron.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (c->x86 == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) unsigned int l2 = c->x86_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) char *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) switch (c->x86_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (l2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) p = "Celeron (Covington)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) else if (l2 == 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) p = "Mobile Pentium II (Dixon)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (l2 == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) p = "Celeron (Mendocino)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) else if (c->x86_stepping == 0 || c->x86_stepping == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) p = "Celeron-A";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (l2 == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) p = "Celeron (Coppermine)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) strcpy(c->x86_model_id, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (c->x86 == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) set_cpu_cap(c, X86_FEATURE_P4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (c->x86 == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) set_cpu_cap(c, X86_FEATURE_P3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* Work around errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) srat_detect_node(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) init_ia32_feat_ctl(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (cpu_has(c, X86_FEATURE_TME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) detect_tme(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) init_intel_misc_features(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (tsx_ctrl_state == TSX_CTRL_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) tsx_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (tsx_ctrl_state == TSX_CTRL_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) tsx_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) split_lock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Intel PIII Tualatin. This comes in two flavours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * One has 256kb of cache, the other 512. We have no way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * to determine which, so we use a boottime override
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * for the 512kb model, and assume 256 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Intel Quark SoC X1000 contains a 4-way set associative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * 16K cache with a 16 byte cache line and 256 lines per tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((c->x86 == 5) && (c->x86_model == 9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) #define TLB_INST_4K 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) #define TLB_INST_4M 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) #define TLB_INST_2M_4M 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) #define TLB_INST_ALL 0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) #define TLB_INST_1G 0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) #define TLB_DATA_4K 0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) #define TLB_DATA_4M 0x12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) #define TLB_DATA_2M_4M 0x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #define TLB_DATA_4K_4M 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #define TLB_DATA_1G 0x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #define TLB_DATA0_4K 0x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #define TLB_DATA0_4M 0x22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) #define TLB_DATA0_2M_4M 0x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) #define STLB_4K 0x41
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) #define STLB_4K_2M 0x42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static const struct _tlb_table intel_tlb_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) { 0x00, 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static void intel_tlb_lookup(const unsigned char desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned char k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (desc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* look up this descriptor in the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) for (k = 0; intel_tlb_table[k].descriptor != desc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) intel_tlb_table[k].descriptor != 0; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (intel_tlb_table[k].tlb_type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) switch (intel_tlb_table[k].tlb_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) case STLB_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case STLB_4K_2M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) case TLB_INST_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) case TLB_INST_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) case TLB_INST_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) case TLB_INST_2M_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) case TLB_DATA_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) case TLB_DATA0_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) case TLB_DATA_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case TLB_DATA0_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) case TLB_DATA_2M_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) case TLB_DATA0_2M_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) case TLB_DATA_4K_4M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) case TLB_DATA_1G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void intel_detect_tlb(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) unsigned int regs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) unsigned char *desc = (unsigned char *)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (c->cpuid_level < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* Number of times to iterate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) n = cpuid_eax(2) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) for (i = 0 ; i < n ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* If bit 31 is set, this is an unknown format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) for (j = 0 ; j < 3 ; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (regs[j] & (1 << 31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) regs[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Byte 0 is level count, not a descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) for (j = 1 ; j < 16 ; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) intel_tlb_lookup(desc[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static const struct cpu_dev intel_cpu_dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .c_vendor = "Intel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .c_ident = { "GenuineIntel" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .legacy_models = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) { .family = 4, .model_names =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) [0] = "486 DX-25/33",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) [1] = "486 DX-50",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) [2] = "486 SX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) [3] = "486 DX/2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) [4] = "486 SL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) [5] = "486 SX/2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) [7] = "486 DX/2-WB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) [8] = "486 DX/4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) [9] = "486 DX/4-WB"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) { .family = 5, .model_names =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) [0] = "Pentium 60/66 A-step",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) [1] = "Pentium 60/66",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) [2] = "Pentium 75 - 200",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) [3] = "OverDrive PODP5V83",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) [4] = "Pentium MMX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) [7] = "Mobile Pentium 75 - 200",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) [8] = "Mobile Pentium MMX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) [9] = "Quark SoC X1000",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) { .family = 6, .model_names =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) [0] = "Pentium Pro A-step",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) [1] = "Pentium Pro",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) [3] = "Pentium II (Klamath)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) [4] = "Pentium II (Deschutes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) [5] = "Pentium II (Deschutes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) [6] = "Mobile Pentium II",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) [7] = "Pentium III (Katmai)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) [8] = "Pentium III (Coppermine)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) [10] = "Pentium III (Cascades)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) [11] = "Pentium III (Tualatin)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) { .family = 15, .model_names =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) [0] = "Pentium 4 (Unknown)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) [1] = "Pentium 4 (Willamette)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) [2] = "Pentium 4 (Northwood)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) [4] = "Pentium 4 (Foster)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) [5] = "Pentium 4 (Foster)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .legacy_cache_size = intel_size_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .c_detect_tlb = intel_detect_tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .c_early_init = early_init_intel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .c_bsp_init = bsp_init_intel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .c_init = init_intel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .c_x86_vendor = X86_VENDOR_INTEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) cpu_dev_register(intel_cpu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #define pr_fmt(fmt) "x86/split lock detection: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) const char *option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) enum split_lock_detect_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) } sld_options[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) { "off", sld_off },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) { "warn", sld_warn },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) { "fatal", sld_fatal },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static inline bool match_option(const char *arg, int arglen, const char *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int len = strlen(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return len == arglen && !strncmp(arg, opt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static bool split_lock_verify_msr(bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u64 ctrl, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) rdmsrl(MSR_TEST_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return ctrl == tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static void __init split_lock_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) enum split_lock_detect_state state = sld_warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) char arg[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (!split_lock_verify_msr(false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pr_info("MSR access failed: Disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ret = cmdline_find_option(boot_command_line, "split_lock_detect",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) arg, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (match_option(arg, ret, sld_options[i].option)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) state = sld_options[i].state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) case sld_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) pr_info("disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) case sld_warn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) pr_info("warning about user-space split_locks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) case sld_fatal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) pr_info("sending SIGBUS on user-space split_locks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!split_lock_verify_msr(true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) pr_info("MSR access failed: Disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) sld_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * is not implemented as one thread could undo the setting of the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * thread immediately after dropping the lock anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void sld_update_msr(bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u64 test_ctrl_val = msr_test_ctrl_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void split_lock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (cpu_model_supports_sld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) split_lock_verify_msr(sld_state != sld_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void split_lock_warn(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) current->comm, current->pid, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Disable the split lock detection for this task so it can make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * progress and set TIF_SLD so the detection is re-enabled via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * switch_to_sld() when the task is scheduled out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) sld_update_msr(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) set_tsk_thread_flag(current, TIF_SLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) bool handle_guest_split_lock(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (sld_state == sld_warn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) split_lock_warn(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) sld_state == sld_fatal ? "fatal" : "bogus", ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) current->thread.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) current->thread.trap_nr = X86_TRAP_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) EXPORT_SYMBOL_GPL(handle_guest_split_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) bool handle_user_split_lock(struct pt_regs *regs, long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) split_lock_warn(regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * This function is called only when switching between tasks with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * different split-lock detection modes. It sets the MSR for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * mode of the new task. This is right most of the time, but since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * the MSR is shared by hyperthreads on a physical core there can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * be glitches when the two threads need different modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) void switch_to_sld(unsigned long tifn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) sld_update_msr(!(tifn & _TIF_SLD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * only be trusted if it is confirmed that a CPU model implements a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * specific feature at a particular bit position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * The possible driver data field values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * - 0: CPU models that are known to have the per-core split-lock detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * bit 5 to enumerate the per-core split-lock detection feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) const struct x86_cpu_id *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) u64 ia32_core_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) m = x86_match_cpu(split_lock_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) switch (m->driver_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) cpu_model_supports_sld = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) split_lock_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }