^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * verify_cpu.S - Code for cpu long mode and SSE verification. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * code has been borrowed from boot/setup.S and was introduced by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Andi Kleen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (c) 2007 Andi Kleen (ak@suse.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This is a common code for verification whether CPU supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * long mode and SSE or not. It is not called directly instead this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * file is included at various places and compiled in that context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * This file is expected to run in 32bit code. Currently:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * arch/x86/boot/compressed/head_64.S: Boot cpu verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * arch/x86/kernel/trampoline_64.S: secondary processor verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * arch/x86/kernel/head_32.S: processor startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * verify_cpu, returns the status of longmode and SSE in register %eax.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * 0: Success 1: Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * The caller needs to check for the error code and take the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * appropriately. Either display a message or halt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/cpufeatures.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/msr-index.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) SYM_FUNC_START_LOCAL(verify_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pushf # Save caller passed flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) push $0 # Kill any dangerous flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) popf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifndef __x86_64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) pushfl # standard way to check for cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) popl %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) movl %eax,%ebx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) xorl $0x200000,%eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pushl %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) popfl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) pushfl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) popl %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) cmpl %eax,%ebx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) jz .Lverify_cpu_no_longmode # cpu has no cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) movl $0x0,%eax # See if cpuid 1 is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cmpl $0x1,%eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) jb .Lverify_cpu_no_longmode # no cpuid 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) xor %di,%di
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cmpl $0x68747541,%ebx # AuthenticAMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) jnz .Lverify_cpu_noamd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) cmpl $0x69746e65,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) jnz .Lverify_cpu_noamd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) cmpl $0x444d4163,%ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) jnz .Lverify_cpu_noamd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mov $1,%di # cpu is from AMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) jmp .Lverify_cpu_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .Lverify_cpu_noamd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) cmpl $0x756e6547,%ebx # GenuineIntel?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) jnz .Lverify_cpu_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) cmpl $0x49656e69,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) jnz .Lverify_cpu_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) cmpl $0x6c65746e,%ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) jnz .Lverify_cpu_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) # only call IA32_MISC_ENABLE when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) # family > 6 || (family == 6 && model >= 0xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) movl $0x1, %eax # check CPU family and model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) movl %eax, %ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) andl $0x0ff00f00, %eax # mask family and extended family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) shrl $8, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cmpl $6, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ja .Lverify_cpu_clear_xd # family > 6, ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) jb .Lverify_cpu_check # family < 6, skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) andl $0x000f00f0, %ecx # mask model and extended model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) shrl $4, %ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) cmpl $0xd, %ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) jb .Lverify_cpu_check # family == 6, model < 0xd, skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) .Lverify_cpu_clear_xd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) movl $MSR_IA32_MISC_ENABLE, %ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rdmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) jnc .Lverify_cpu_check # only write MSR if bit was changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) wrmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .Lverify_cpu_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) movl $0x1,%eax # Does the cpu have what it takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) andl $REQUIRED_MASK0,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) xorl $REQUIRED_MASK0,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) jnz .Lverify_cpu_no_longmode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) movl $0x80000000,%eax # See if extended cpuid is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cmpl $0x80000001,%eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) jb .Lverify_cpu_no_longmode # no extended cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) movl $0x80000001,%eax # Does the cpu have what it takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) andl $REQUIRED_MASK1,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) xorl $REQUIRED_MASK1,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) jnz .Lverify_cpu_no_longmode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .Lverify_cpu_sse_test:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) movl $1,%eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) andl $SSE_MASK,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cmpl $SSE_MASK,%edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) je .Lverify_cpu_sse_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) test %di,%di
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) jz .Lverify_cpu_no_longmode # only try to force SSE on AMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) movl $MSR_K7_HWCR,%ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) rdmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) btr $15,%eax # enable SSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) wrmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) xor %di,%di # don't loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) jmp .Lverify_cpu_sse_test # try again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) .Lverify_cpu_no_longmode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) popf # Restore caller passed flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) movl $1,%eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .Lverify_cpu_sse_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) popf # Restore caller passed flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) xorl %eax, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) SYM_FUNC_END(verify_cpu)