^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * detailed at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This code was originally written hastily under an awful lot of stress and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * aspects of it are somewhat hacky. Unfortunately, changing anything in here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * instantly makes me feel ill. Thanks, Jann. Thann.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2020 Google LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * "If there's something strange in your neighbourhood, who you gonna call?"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/spectre.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/vectors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * We try to ensure that the mitigation state can never change as the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * onlining a late CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void update_mitigation_state(enum mitigation_state *oldp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) enum mitigation_state new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) state = READ_ONCE(*oldp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (new <= state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Userspace almost certainly can't deal with this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (WARN_ON(system_capabilities_finalized()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) } while (cmpxchg_relaxed(oldp, state, new) != state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Spectre v1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * The kernel can't protect userspace for this one: it's each person for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * themselves. Advertise what we're doing and be done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return sprintf(buf, "Mitigation: __user pointer sanitization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Spectre v2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * This one sucks. A CPU is either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * - Mitigated in hardware and listed in our "safe list".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * - Mitigated in software by firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * - Mitigated in software by a CPU-specific dance in the kernel and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * firmware call at EL2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * - Vulnerable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * It's not unlikely for different CPUs in a big.LITTLE system to fall into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * different camps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static enum mitigation_state spectre_v2_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static bool __read_mostly __nospectre_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int __init parse_spectre_v2_param(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) __nospectre_v2 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) early_param("nospectre_v2", parse_spectre_v2_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static bool spectre_v2_mitigations_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bool ret = __nospectre_v2 || cpu_mitigations_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) pr_info_once("spectre-v2 mitigation disabled by command line option\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) switch (bhb_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) case SPECTRE_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) case SPECTRE_VULNERABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ", but not BHB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) case SPECTRE_MITIGATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ", BHB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static bool _unprivileged_ebpf_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_BPF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return !sysctl_unprivileged_bpf_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) const char *bhb_str = get_bhb_affected_string(bhb_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) const char *v2_str = "Branch predictor hardening";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) switch (spectre_v2_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) case SPECTRE_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (bhb_state == SPECTRE_UNAFFECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return sprintf(buf, "Not affected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Platforms affected by Spectre-BHB can't report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * "Not affected" for Spectre-v2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) v2_str = "CSV2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) case SPECTRE_MITIGATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) case SPECTRE_VULNERABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return sprintf(buf, "Vulnerable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u64 pfr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static const struct midr_range spectre_v2_safe_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* If the CPU has CSV2 set, we're safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pfr0 = read_cpuid(ID_AA64PFR0_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Alternatively, we have a list of unaffected CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ARM_SMCCC_ARCH_WORKAROUND_1, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ret = res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case SMCCC_RET_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case SMCCC_RET_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) enum mitigation_state arm64_get_spectre_v2_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return spectre_v2_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void install_bp_hardening_cb(bp_hardening_cb_t fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) __this_cpu_write(bp_hardening_data.fn, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Vinz Clortho takes the hyp_vecs start/end "keys" at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * the door when we're a guest. Skip the hyp-vectors work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (!is_hyp_mode_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void call_smc_arch_workaround_1(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void call_hvc_arch_workaround_1(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void qcom_link_stack_sanitisation(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) asm volatile("mov %0, x30 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ".rept 16 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) "bl . + 4 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ".endr \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) "mov x30, %0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) : "=&r" (tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u32 midr = read_cpuid_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return qcom_link_stack_sanitisation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bp_hardening_cb_t cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) state = spectre_v2_get_cpu_fw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (state != SPECTRE_MITIGATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (spectre_v2_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) switch (arm_smccc_1_1_get_conduit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case SMCCC_CONDUIT_HVC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) cb = call_hvc_arch_workaround_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case SMCCC_CONDUIT_SMC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cb = call_smc_arch_workaround_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Prefer a CPU-specific workaround if it exists. Note that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * still rely on firmware for the mitigation at EL2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) install_bp_hardening_cb(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) state = spectre_v2_get_cpu_hw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (state == SPECTRE_VULNERABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) state = spectre_v2_enable_fw_mitigation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) update_mitigation_state(&spectre_v2_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Spectre-v3a.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * Phew, there's not an awful lot to do here! We just instruct EL2 to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * an indirect trampoline for the hyp vectors so that guests can't read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static const struct midr_range spectre_v3a_unsafe_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) data->slot += HYP_VECTOR_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Spectre v4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * - Mitigated in hardware and listed in our "safe list".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * - Mitigated in hardware via PSTATE.SSBS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * - Mitigated in software by firmware (sometimes referred to as SSBD).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Wait, that doesn't sound so bad, does it? Keep reading...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * A major source of headaches is that the software mitigation is enabled both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * on a per-task basis, but can also be forced on for the kernel, necessitating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * so you can have systems that have both firmware and SSBS mitigations. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * means we actually have to reject late onlining of CPUs with mitigations if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * all of the currently onlined CPUs are safelisted, as the mitigation tends to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * be opt-in for userspace. Yes, really, the cure is worse than the disease.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * The only good part is that if the firmware mitigation is present, then it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * present for all CPUs, meaning we don't have to worry about late onlining of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Give me a VAX-11/780 any day of the week...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static enum mitigation_state spectre_v4_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* This is the per-cpu state tracking whether we need to talk to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) enum spectre_v4_policy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) SPECTRE_V4_POLICY_MITIGATION_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) SPECTRE_V4_POLICY_MITIGATION_DISABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static const struct spectre_v4_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) const char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) enum spectre_v4_policy policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } spectre_v4_params[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int __init parse_spectre_v4_param(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!str || !str[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) const struct spectre_v4_param *param = &spectre_v4_params[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (strncmp(str, param->str, strlen(param->str)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) __spectre_v4_policy = param->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) early_param("ssbd", parse_spectre_v4_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Because this was all written in a rush by people working in different silos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * we've ended up with multiple command line options to control the same thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * Wrap these up in some helpers, which prefer disabling the mitigation if faced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * with contradictory parameters. The mitigation is always either "off",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * "dynamic" or "on".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static bool spectre_v4_mitigations_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bool ret = cpu_mitigations_off() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static bool spectre_v4_mitigations_dynamic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return !spectre_v4_mitigations_off() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static bool spectre_v4_mitigations_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return !spectre_v4_mitigations_off() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ssize_t cpu_show_spec_store_bypass(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) switch (spectre_v4_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) case SPECTRE_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return sprintf(buf, "Not affected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case SPECTRE_MITIGATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) case SPECTRE_VULNERABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return sprintf(buf, "Vulnerable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) enum mitigation_state arm64_get_spectre_v4_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return spectre_v4_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static const struct midr_range spectre_v4_safe_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) { /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* CPU features are detected first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (this_cpu_has_cap(ARM64_SSBS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ARM_SMCCC_ARCH_WORKAROUND_2, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ret = res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) case SMCCC_RET_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case SMCCC_RET_NOT_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case SMCCC_RET_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) state = spectre_v4_get_cpu_hw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (state == SPECTRE_VULNERABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) state = spectre_v4_get_cpu_fw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return state != SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (instr & BIT(PSTATE_Imm_shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) regs->pstate |= PSR_SSBS_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) regs->pstate &= ~PSR_SSBS_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) arm64_skip_faulting_instruction(regs, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static struct undef_hook ssbs_emulation_hook = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) .instr_mask = ~(1U << PSTATE_Imm_shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) .instr_val = 0xd500401f | PSTATE_SSBS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) .fn = ssbs_emulation_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static bool undef_hook_registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static DEFINE_RAW_SPINLOCK(hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * If the system is mitigated but this CPU doesn't have SSBS, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * we must be on the safelist and there's nothing more to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) state = spectre_v4_get_cpu_hw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) raw_spin_lock(&hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!undef_hook_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) register_undef_hook(&ssbs_emulation_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) undef_hook_registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) raw_spin_unlock(&hook_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (spectre_v4_mitigations_off()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) set_pstate_ssbs(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) set_pstate_ssbs(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * we fallthrough and check whether firmware needs to be called on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) __le32 *origptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) BUG_ON(nr_inst != 1); /* Branch -> NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (spectre_v4_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (cpus_have_final_cap(ARM64_SSBS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (spectre_v4_mitigations_dynamic())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *updptr = cpu_to_le32(aarch64_insn_gen_nop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * to call into firmware to adjust the mitigation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) __le32 *origptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) switch (arm_smccc_1_1_get_conduit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) case SMCCC_CONDUIT_HVC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) insn = aarch64_insn_get_hvc_value();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) case SMCCC_CONDUIT_SMC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) insn = aarch64_insn_get_smc_value();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *updptr = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) state = spectre_v4_get_cpu_fw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (state != SPECTRE_MITIGATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (spectre_v4_mitigations_off()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (spectre_v4_mitigations_dynamic())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) __this_cpu_write(arm64_ssbd_callback_required, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) enum mitigation_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) state = spectre_v4_enable_hw_mitigation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (state == SPECTRE_VULNERABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) state = spectre_v4_enable_fw_mitigation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) update_mitigation_state(&spectre_v4_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) regs->pstate |= bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) regs->pstate &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct pt_regs *regs = task_pt_regs(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (spectre_v4_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ssbs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) else if (spectre_v4_mitigations_dynamic() && !kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) __update_pstate_ssbs(regs, ssbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * This is interesting because the "speculation disabled" behaviour can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * configured so that it is preserved across exec(), which means that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static void ssbd_prctl_enable_mitigation(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) task_clear_spec_ssb_noexec(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) task_set_spec_ssb_disable(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) set_tsk_thread_flag(task, TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void ssbd_prctl_disable_mitigation(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) task_clear_spec_ssb_noexec(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) task_clear_spec_ssb_disable(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) clear_tsk_thread_flag(task, TIF_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) switch (ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) case PR_SPEC_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* Enable speculation: disable mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Force disabled speculation prevents it from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * re-enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (task_spec_ssb_force_disable(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * If the mitigation is forced on, then speculation is forced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * off and we again prevent it from being re-enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (spectre_v4_mitigations_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ssbd_prctl_disable_mitigation(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) case PR_SPEC_FORCE_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Force disable speculation: force enable mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * If the mitigation is forced off, then speculation is forced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * on and we prevent it from being disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (spectre_v4_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) task_set_spec_ssb_force_disable(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) case PR_SPEC_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Disable speculation: enable mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Same as PR_SPEC_FORCE_DISABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (spectre_v4_mitigations_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ssbd_prctl_enable_mitigation(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) case PR_SPEC_DISABLE_NOEXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* Disable speculation until execve(): enable mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * If the mitigation state is forced one way or the other, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * we must fail now before we try to toggle it on execve().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (task_spec_ssb_force_disable(task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) spectre_v4_mitigations_off() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) spectre_v4_mitigations_on()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ssbd_prctl_enable_mitigation(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) task_set_spec_ssb_noexec(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) spectre_v4_enable_task_mitigation(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned long ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) case PR_SPEC_STORE_BYPASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return ssbd_prctl_set(task, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int ssbd_prctl_get(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) switch (spectre_v4_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) case SPECTRE_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return PR_SPEC_NOT_AFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) case SPECTRE_MITIGATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (spectre_v4_mitigations_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return PR_SPEC_NOT_AFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (spectre_v4_mitigations_dynamic())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Mitigations are disabled, so we're vulnerable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) case SPECTRE_VULNERABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return PR_SPEC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* Check the mitigation state for this task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (task_spec_ssb_force_disable(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (task_spec_ssb_noexec(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (task_spec_ssb_disable(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) switch (which) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) case PR_SPEC_STORE_BYPASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return ssbd_prctl_get(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Spectre BHB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * A CPU is either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * - Mitigated by a branchy loop a CPU specific number of times, and listed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * in our "loop mitigated list".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * - Mitigated in software by the firmware Spectre v2 call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * - Has the ClearBHB instruction to perform the mitigation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * software mitigation in the vectors is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * - Has CSV2.3, so is unaffected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static enum mitigation_state spectre_bhb_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) enum mitigation_state arm64_get_spectre_bhb_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return spectre_bhb_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) enum bhb_mitigation_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) BHB_LOOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) BHB_FW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) BHB_HW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) BHB_INSN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static unsigned long system_bhb_mitigations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * SCOPE_SYSTEM call will give the right answer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u8 spectre_bhb_loop_affected(int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u8 k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static u8 max_bhb_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (scope == SCOPE_LOCAL_CPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static const struct midr_range spectre_bhb_k32_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static const struct midr_range spectre_bhb_k24_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static const struct midr_range spectre_bhb_k8_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) k = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) k = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) k = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) max_bhb_k = max(max_bhb_k, k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) k = max_bhb_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct arm_smccc_res res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ARM_SMCCC_ARCH_WORKAROUND_3, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ret = res.a0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) case SMCCC_RET_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return SPECTRE_UNAFFECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) case SMCCC_RET_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static bool is_spectre_bhb_fw_affected(int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static bool system_affected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) enum mitigation_state fw_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spectre_bhb_firmware_mitigated_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (scope != SCOPE_LOCAL_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return system_affected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) system_affected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static bool supports_ecbhb(int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) u64 mmfr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (scope == SCOPE_LOCAL_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return cpuid_feature_extract_unsigned_field(mmfr1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ID_AA64MMFR1_ECBHB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int scope)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (supports_csv2p3(scope))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (supports_clearbhb(scope))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (spectre_bhb_loop_affected(scope))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (is_spectre_bhb_fw_affected(scope))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) const char *v = arm64_get_bp_hardening_vector(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (slot < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) __this_cpu_write(this_cpu_vector, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * When KPTI is in use, the vectors are switched when exiting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (arm64_kernel_unmapped_at_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) write_sysreg(v, vbar_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) bp_hardening_cb_t cpu_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /* No point mitigating Spectre-BHB alone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) } else if (cpu_mitigations_off()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pr_info_once("spectre-bhb mitigation disabled by command line option\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) state = SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) set_bit(BHB_HW, &system_bhb_mitigations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * Ensure KVM uses the indirect vector which will have ClearBHB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!data->slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) data->slot = HYP_VECTOR_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) state = SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) set_bit(BHB_INSN, &system_bhb_mitigations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Ensure KVM uses the indirect vector which will have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * branchy-loop added. A57/A72-r0 will already have selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * the spectre-indirect vector, which is sufficient for BHB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!data->slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) data->slot = HYP_VECTOR_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) state = SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) set_bit(BHB_LOOP, &system_bhb_mitigations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (fw_state == SPECTRE_MITIGATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Ensure KVM uses one of the spectre bp_hardening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * vectors. The indirect vector doesn't include the EL3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * call, so needs upgrading to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * HYP_VECTOR_SPECTRE_INDIRECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) data->slot += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * The WA3 call in the vectors supersedes the WA1 call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * made during context-switch. Uninstall any firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * bp_hardening callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) cpu_cb = spectre_v2_get_sw_mitigation_cb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) __this_cpu_write(bp_hardening_data.fn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) state = SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) set_bit(BHB_FW, &system_bhb_mitigations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) update_mitigation_state(&spectre_bhb_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* Patched to NOP when enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) __le32 *origptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) BUG_ON(nr_inst != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (test_bit(BHB_LOOP, &system_bhb_mitigations))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Patched to NOP when enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) __le32 *origptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) BUG_ON(nr_inst != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (test_bit(BHB_FW, &system_bhb_mitigations))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* Patched to correct the immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) u8 rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) BUG_ON(nr_inst != 1); /* MOV -> MOV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) insn = le32_to_cpu(*origptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) AARCH64_INSN_MOVEWIDE_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Patched to mov WA3 when supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) u8 rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) BUG_ON(nr_inst != 1); /* MOV -> MOV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) !test_bit(BHB_FW, &system_bhb_mitigations))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) insn = le32_to_cpu(*origptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) AARCH64_INSN_VARIANT_32BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) AARCH64_INSN_REG_ZR, rd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) ARM_SMCCC_ARCH_WORKAROUND_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) *updptr++ = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Patched to NOP when not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) __le32 *origptr, __le32 *updptr, int nr_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) BUG_ON(nr_inst != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (test_bit(BHB_INSN, &system_bhb_mitigations))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #ifdef CONFIG_BPF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) void unpriv_ebpf_notify(int new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (spectre_v2_state == SPECTRE_VULNERABLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) spectre_bhb_state != SPECTRE_MITIGATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (!new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) pr_err("WARNING: %s", EBPF_WARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) #endif