^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include "bitops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/processor-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/required-features.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/msr-index.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "cpuflags.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct cpu_features cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) u32 cpu_vendor[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static bool loaded_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static int has_fpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u16 fcw = -1, fsw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) asm volatile("mov %%cr0,%0" : "=r" (cr0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) asm volatile("mov %0,%%cr0" : : "r" (cr0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) : "+m" (fsw), "+m" (fcw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return fsw == 0 && (fcw & 0x103f) == 0x003f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * For building the 16-bit code we want to explicitly specify 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * push/pop operations, rather than just saying 'pushf' or 'popf' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * letting the compiler choose. But this is also included from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * compressed/ directory where it may be 64-bit code, and thus needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * to be 'pushfq' or 'popfq' in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef __x86_64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PUSHF "pushfq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define POPF "popfq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define PUSHF "pushfl"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define POPF "popfl"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int has_eflag(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long f0, f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) asm volatile(PUSHF " \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) PUSHF " \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) "pop %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "mov %0,%1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) "xor %2,%1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) "push %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) POPF " \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) PUSHF " \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "pop %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) POPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) : "=&r" (f0), "=&r" (f1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) : "ri" (mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return !!((f0^f1) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Handle x86_32 PIC using ebx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #if defined(__i386__) && defined(__PIC__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) # define EBX_REG "=r"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) # define EBX_REG "=b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline void cpuid_count(u32 id, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 *a, u32 *b, u32 *c, u32 *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) "cpuid \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) : "a" (id), "c" (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void get_cpuflags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 max_intel_level, max_amd_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 tfms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 ignored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (loaded_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) loaded_flags = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (has_fpu())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) set_bit(X86_FEATURE_FPU, cpu.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (has_eflag(X86_EFLAGS_ID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) &cpu_vendor[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (max_intel_level >= 0x00000001 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) max_intel_level <= 0x0000ffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) &cpu.flags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cpu.level = (tfms >> 8) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cpu.family = cpu.level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cpu.model = (tfms >> 4) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (cpu.level >= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) cpu.model += ((tfms >> 16) & 0xf) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (max_intel_level >= 0x00000007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cpuid_count(0x00000007, 0, &ignored, &ignored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) &cpu.flags[16], &ignored);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) &ignored);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (max_amd_level >= 0x80000001 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) max_amd_level <= 0x8000ffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) &cpu.flags[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }