^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* ----------------------------------------------------------------------- *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * ----------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * raid6/x86.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Definitions common to x86 and x86-64 RAID-6 code only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef LINUX_RAID_RAID6X86_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define LINUX_RAID_RAID6X86_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifdef __KERNEL__ /* Real code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #else /* Dummy code for user space testing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline void kernel_fpu_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static inline void kernel_fpu_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define __aligned(x) __attribute__((aligned(x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * (fast save and restore) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define X86_FEATURE_AVX512DQ (9*32+17) /* AVX-512 DQ (Double/Quad granular)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define X86_FEATURE_AVX512BW (9*32+30) /* AVX-512 BW (Byte/Word granular)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define X86_FEATURE_AVX512VL (9*32+31) /* AVX-512 VL (128/256 Vector Length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Extensions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Should work well enough on modern CPUs for testing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline int boot_cpu_has(int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) eax = (flag & 0x100) ? 7 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) (flag & 0x20) ? 0x80000001 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ecx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) asm volatile("cpuid"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return ((flag & 0x100 ? ebx :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) (flag & 0x80) ? ecx : edx) >> (flag & 31)) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif /* ndef __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #endif