^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_MICROCODE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_MICROCODE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/earlycpio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) struct ucode_patch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct list_head plist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void *data; /* Intel uses only this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) u32 patch_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) u16 equiv_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) extern struct list_head microcode_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct cpu_signature {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned int sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned int pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned int rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) enum ucode_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) UCODE_OK = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) UCODE_NEW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) UCODE_UPDATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) UCODE_NFOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) UCODE_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct microcode_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) enum ucode_state (*request_microcode_user) (int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) const void __user *buf, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) bool refresh_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void (*microcode_fini_cpu) (int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The generic 'microcode_core' part guarantees that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * the callbacks below run on a target cpu when they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * are being called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * See also the "Synchronization" section in microcode_core.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) enum ucode_state (*apply_microcode) (int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ucode_cpu_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct cpu_signature cpu_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void *mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) extern struct ucode_cpu_info ucode_cpu_info[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef CONFIG_MICROCODE_INTEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) extern struct microcode_ops * __init init_intel_microcode(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline struct microcode_ops * __init init_intel_microcode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #endif /* CONFIG_MICROCODE_INTEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #ifdef CONFIG_MICROCODE_AMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) extern struct microcode_ops * __init init_amd_microcode(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) extern void __exit exit_amd_microcode(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline struct microcode_ops * __init init_amd_microcode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline void __exit exit_amd_microcode(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define MAX_UCODE_COUNT 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define CPUID_IS(a, b, c, ebx, ecx, edx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * x86_cpuid_vendor() gets vendor id for BSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * x86_cpuid_vendor() gets vendor information directly from CPUID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline int x86_cpuid_vendor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 eax = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 ebx, ecx = 0, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) native_cpuid(&eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return X86_VENDOR_INTEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return X86_VENDOR_AMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return X86_VENDOR_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline unsigned int x86_cpuid_family(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 eax = 0x00000001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 ebx, ecx = 0, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) native_cpuid(&eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return x86_family(eax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #ifdef CONFIG_MICROCODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int __init microcode_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern void __init load_ucode_bsp(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) extern void load_ucode_ap(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void reload_early_microcode(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) extern bool initrd_gone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static inline int __init microcode_init(void) { return 0; };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void __init load_ucode_bsp(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline void load_ucode_ap(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static inline void reload_early_microcode(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif /* _ASM_X86_MICROCODE_H */