^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * xsave/xrstor support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Suresh Siddha <suresh.b.siddha@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pkeys.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/fpu/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/fpu/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/fpu/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/fpu/xstate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Although we spell it out in here, the Processor Trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * xfeature is completely unused. We use other mechanisms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * to save/restore PT state in Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const char *xfeature_names[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) "x87 floating point registers" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) "SSE registers" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) "AVX registers" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) "MPX bounds registers" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) "MPX CSR" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "AVX-512 opmask" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "AVX-512 Hi256" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "AVX-512 ZMM_Hi256" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "Processor Trace (unused)" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "Protection Keys User registers",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "PASID state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) "unknown xstate feature" ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static short xsave_cpuid_features[] __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) X86_FEATURE_FPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) X86_FEATURE_XMM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) X86_FEATURE_AVX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) X86_FEATURE_MPX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) X86_FEATURE_MPX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) X86_FEATURE_AVX512F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) X86_FEATURE_AVX512F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) X86_FEATURE_AVX512F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) X86_FEATURE_INTEL_PT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) X86_FEATURE_PKU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) X86_FEATURE_ENQCMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * This represents the full set of bits that should ever be set in a kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * XSAVE buffer, both supervisor and user xstates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u64 xfeatures_mask_all __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * The XSAVE area of kernel can be in standard or compacted format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * it is always in standard format for user mode. This is the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * mode standard format size used for signal and ptrace frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int fpu_user_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Return whether the system supports a given xfeature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Also return the name of the (most advanced) feature that the caller requested:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (unlikely(feature_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) long xfeature_idx, max_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 xfeatures_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * So we use FLS here to be able to print the most advanced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * feature that was requested but is missing. So if a driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * missing AVX feature - this is the most informative message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * to users:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (xfeatures_missing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) xfeatures_print = xfeatures_missing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) xfeatures_print = xfeatures_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) xfeature_idx = fls64(xfeatures_print)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) max_idx = ARRAY_SIZE(xfeature_names)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) xfeature_idx = min(xfeature_idx, max_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *feature_name = xfeature_names[xfeature_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (xfeatures_missing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static bool xfeature_is_supervisor(int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * for a user state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return ecx & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * When executing XSAVEOPT (or other optimized XSAVE instructions), if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * a processor implementation detects that an FPU state component is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * (or is again) in its initialized state, it may clear the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * bit in the header.xfeatures field, and can skip the writeout of registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * to the corresponding memory layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * This means that when the bit is zero, the state component might still contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * some previous - non-initialized register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Before writing xstate information to user-space we sanitize those components,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * to always ensure that the memory layout of a feature will be in the init state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * if the corresponding header bit is zero. This is to ensure that user-space doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * see some stale state in the memory layout during signal handling, debugging etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void fpstate_sanitize_xstate(struct fpu *fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct fxregs_state *fx = &fpu->state.fxsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int feature_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u64 xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!use_xsaveopt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) xfeatures = fpu->state.xsave.header.xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * None of the feature bits are in init state. So nothing else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * to do for us, as the memory layout is up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if ((xfeatures & xfeatures_mask_all) == xfeatures_mask_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * FP is in init state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!(xfeatures & XFEATURE_MASK_FP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) fx->cwd = 0x37f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) fx->swd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) fx->twd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fx->fop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) fx->rip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) fx->rdp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) memset(&fx->st_space[0], 0, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * SSE is in init state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!(xfeatures & XFEATURE_MASK_SSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) memset(&fx->xmm_space[0], 0, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * First two features are FPU and SSE, which above we handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * in a special way already:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) feature_bit = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) xfeatures = (xfeatures_mask_user() & ~xfeatures) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Update all the remaining memory layouts according to their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * standard xstate layout, if their header bit is in the init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) while (xfeatures) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (xfeatures & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int offset = xstate_comp_offsets[feature_bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int size = xstate_sizes[feature_bit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) memcpy((void *)fx + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) (void *)&init_fpstate.xsave + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) xfeatures >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) feature_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Enable the extended processor state save/restore feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Called once per CPU onlining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void fpu__init_cpu_xstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u64 unsup_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Unsupported supervisor xstates should not be found in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * the xfeatures mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsup_bits = xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) WARN_ONCE(unsup_bits, "x86/fpu: Found unsupported supervisor xstates: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsup_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) xfeatures_mask_all &= ~XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) cr4_set_bits(X86_CR4_OSXSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * states can be set here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * MSR_IA32_XSS sets supervisor states managed by XSAVES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (boot_cpu_has(X86_FEATURE_XSAVES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) xfeatures_mask_dynamic());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static bool xfeature_enabled(enum xfeature xfeature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return xfeatures_mask_all & BIT_ULL(xfeature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Record the offsets and sizes of various xstates contained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * in the XSAVE state memory layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void __init setup_xstate_features(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 eax, ebx, ecx, edx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* start at the beginnning of the "extended state" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned int last_good_offset = offsetof(struct xregs_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) extended_state_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * The FP xstates and SSE xstates are legacy states. They are always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * in the fixed offsets in the xsave area in either compacted form
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * or standard form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) xstate_offsets[XFEATURE_FP] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) xmm_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xmm_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!xfeature_enabled(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) xstate_sizes[i] = eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * If an xfeature is supervisor state, the offset in EBX is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * invalid, leave it to -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (xfeature_is_supervisor(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) xstate_offsets[i] = ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * In our xstate size checks, we assume that the highest-numbered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * xstate feature has the highest offset in the buffer. Ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * it does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) WARN_ONCE(last_good_offset > xstate_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) "x86/fpu: misordered xstate at %d\n", last_good_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) last_good_offset = xstate_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void __init print_xstate_feature(u64 xstate_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) const char *feature_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (cpu_has_xfeatures(xstate_mask, &feature_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Print out all the supported xstate features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void __init print_xstate_features(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) print_xstate_feature(XFEATURE_MASK_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) print_xstate_feature(XFEATURE_MASK_SSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) print_xstate_feature(XFEATURE_MASK_YMM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) print_xstate_feature(XFEATURE_MASK_BNDREGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) print_xstate_feature(XFEATURE_MASK_BNDCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) print_xstate_feature(XFEATURE_MASK_OPMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) print_xstate_feature(XFEATURE_MASK_PKRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) print_xstate_feature(XFEATURE_MASK_PASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * This check is important because it is easy to get XSTATE_*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * confused with XSTATE_BIT_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define CHECK_XFEATURE(nr) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) WARN_ON(nr >= XFEATURE_MAX); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * We could cache this like xstate_size[], but we only use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * it here, so it would be a waste of space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int xfeature_is_aligned(int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) CHECK_XFEATURE(xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!xfeature_enabled(xfeature_nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * The value returned by ECX[1] indicates the alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * of state component 'i' when the compacted format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * of the extended region of an XSAVE area is used:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return !!(ecx & 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * This function sets up offsets and sizes of all extended states in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * xsave area. This supports both standard format and compacted format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * of the xsave area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void __init setup_xstate_comp_offsets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * The FP xstates and SSE xstates are legacy states. They are always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * in the fixed offsets in the xsave area in either compacted form
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * or standard form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) xstate_comp_offsets[XFEATURE_FP] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) xmm_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (xfeature_enabled(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) xstate_comp_offsets[i] = xstate_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!xfeature_enabled(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (xfeature_is_aligned(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) next_offset = ALIGN(next_offset, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) xstate_comp_offsets[i] = next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) next_offset += xstate_sizes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Setup offsets of a supervisor-state-only XSAVES buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * The offsets stored in xstate_comp_offsets[] only work for one specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * value of the Requested Feature BitMap (RFBM). In cases where a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * RFBM value is used, a different set of offsets is required. This set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * offsets is for when RFBM=xfeatures_mask_supervisor().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void __init setup_supervisor_only_offsets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned int next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!xfeature_enabled(i) || !xfeature_is_supervisor(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (xfeature_is_aligned(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) next_offset = ALIGN(next_offset, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) xstate_supervisor_only_offsets[i] = next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) next_offset += xstate_sizes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Print out xstate component offsets and sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void __init print_xstate_offset_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!xfeature_enabled(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) i, xstate_comp_offsets[i], i, xstate_sizes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * All supported features have either init state all zeros or are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * handled in setup_init_fpu() individually. This is an explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * newly added supported features at build time and make people
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * actually look at the init state for the new feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #define XFEATURES_INIT_FPSTATE_HANDLED \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) (XFEATURE_MASK_FP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) XFEATURE_MASK_SSE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) XFEATURE_MASK_YMM | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) XFEATURE_MASK_OPMASK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) XFEATURE_MASK_ZMM_Hi256 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) XFEATURE_MASK_Hi16_ZMM | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) XFEATURE_MASK_PKRU | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) XFEATURE_MASK_BNDREGS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) XFEATURE_MASK_BNDCSR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) XFEATURE_MASK_PASID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * setup the xstate image representing the init state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void __init setup_init_fpu_buf(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int on_boot_cpu __initdata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) XFEATURES_INIT_FPSTATE_HANDLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) WARN_ON_FPU(!on_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) on_boot_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!boot_cpu_has(X86_FEATURE_XSAVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) setup_xstate_features();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) print_xstate_features();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (boot_cpu_has(X86_FEATURE_XSAVES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) init_fpstate.xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) xfeatures_mask_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Init all the features state with header.xfeatures being 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) copy_kernel_to_xregs_booting(&init_fpstate.xsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * All components are now in init state. Read the state back so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * that init_fpstate contains all non-zero init state. This only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * works with XSAVE, but not with XSAVEOPT and XSAVES because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * those use the init optimization which skips writing data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * components in init state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * XSAVE could be used, but that would require to reshuffle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * data when XSAVES is available because XSAVES uses xstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * compaction. But doing so is a pointless exercise because most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * components have an all zeros init state except for the legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * ones (FP and SSE). Those can be saved with FXSAVE into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * legacy area. Adding new features requires to ensure that init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * state is all zeroes or if not to add the necessary handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) fxsave(&init_fpstate.fxsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int xfeature_uncompacted_offset(int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Only XSAVES supports supervisor states and it uses compacted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * format. Checking a supervisor state's uncompacted offset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) CHECK_XFEATURE(xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int xfeature_size(int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) CHECK_XFEATURE(xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return eax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * 'XSAVES' implies two different things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * 1. saving of supervisor/system state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * 2. using the compacted format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * Use this function when dealing with the compacted format so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * that it is obvious which aspect of 'XSAVES' is being handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * by the calling code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int using_compacted_format(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return boot_cpu_has(X86_FEATURE_XSAVES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int validate_user_xstate_header(const struct xstate_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* No unknown or supervisor features may be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (hdr->xfeatures & ~xfeatures_mask_user())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Userspace must use the uncompacted format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (hdr->xcomp_bv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * If 'reserved' is shrunken to add a new field, make sure to validate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * that new field here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* No reserved bits may be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void __xstate_dump_leaves(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u32 eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static int should_dump = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!should_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) should_dump = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Dump out a few leaves past the ones that we support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * just in case there are some goodies up there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) for (i = 0; i < XFEATURE_MAX + 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) XSTATE_CPUID, i, eax, ebx, ecx, edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #define XSTATE_WARN_ON(x) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) __xstate_dump_leaves(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if ((nr == nr_macro) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) WARN_ONCE(sz != sizeof(__struct), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) "%s: struct is %zu bytes, cpu state %d bytes\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __stringify(nr_macro), sizeof(__struct), sz)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) __xstate_dump_leaves(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * We have a C struct for each 'xstate'. We need to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * that our software representation matches what the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * tells us about the state's size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void check_xstate_against_struct(int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Ask the CPU for the size of the state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int sz = xfeature_size(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * Match each CPU state with the corresponding software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Make *SURE* to add any feature numbers in below if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * there are "holes" in the xsave state component
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if ((nr < XFEATURE_YMM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) (nr >= XFEATURE_MAX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) WARN_ONCE(1, "no structure for xstate: %d\n", nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) XSTATE_WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * This essentially double-checks what the cpu told us about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * how large the XSAVE buffer needs to be. We are recalculating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * it to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * Dynamic XSAVE features allocate their own buffers and are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * covered by these checks. Only the size of the buffer for task->fpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * is checked here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void do_extra_xstate_size_checks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int paranoid_xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!xfeature_enabled(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) check_xstate_against_struct(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * Supervisor state components can be managed only by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * XSAVES, which is compacted-format only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!using_compacted_format())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) XSTATE_WARN_ON(xfeature_is_supervisor(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Align from the end of the previous feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (xfeature_is_aligned(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) paranoid_xstate_size = ALIGN(paranoid_xstate_size, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * The offset of a given state in the non-compacted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * format is given to us in a CPUID leaf. We check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * them for being ordered (increasing offsets) in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * setup_xstate_features().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (!using_compacted_format())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) paranoid_xstate_size = xfeature_uncompacted_offset(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * The compacted-format offset always depends on where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * the previous state ended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) paranoid_xstate_size += xfeature_size(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Get total size of enabled xstates in XCR0 | IA32_XSS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Note the SDM's wording here. "sub-function 0" only enumerates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * the size of the *user* states. If we use it to size a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * that we use 'XSAVES' on, we could potentially overflow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * buffer because 'XSAVES' saves system states too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static unsigned int __init get_xsaves_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * - CPUID function 0DH, sub-function 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * EBX enumerates the size (in bytes) required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * the XSAVES instruction for an XSAVE area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * containing all the state components
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * corresponding to bits currently set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * XCR0 | IA32_XSS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Get the total size of the enabled xstates without the dynamic supervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static unsigned int __init get_xsaves_size_no_dynamic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) u64 mask = xfeatures_mask_dynamic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return get_xsaves_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Disable dynamic features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Ask the hardware what size is required of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * This is the size required for the task->fpu buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) size = get_xsaves_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Re-enable dynamic features so XSAVES will work on them again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static unsigned int __init get_xsave_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * - CPUID function 0DH, sub-function 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * EBX enumerates the size (in bytes) required by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * the XSAVE instruction for an XSAVE area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * containing all the *user* state components
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * corresponding to bits currently set in XCR0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * Will the runtime-enumerated 'xstate_size' fit in the init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * task's statically-allocated buffer?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static bool is_supported_xstate_size(unsigned int test_xstate_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (test_xstate_size <= sizeof(union fpregs_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) sizeof(union fpregs_state), test_xstate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static int __init init_xstate_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Recompute the context size for enabled features: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) unsigned int possible_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) unsigned int xsave_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) xsave_size = get_xsave_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (boot_cpu_has(X86_FEATURE_XSAVES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) possible_xstate_size = get_xsaves_size_no_dynamic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) possible_xstate_size = xsave_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Ensure we have the space to store all enabled: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!is_supported_xstate_size(possible_xstate_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * The size is OK, we are definitely going to use xsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * make it known to the world that we need more space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) fpu_kernel_xstate_size = possible_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) do_extra_xstate_size_checks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * User space is always in standard format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) fpu_user_xstate_size = xsave_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * We enabled the XSAVE hardware, but something went wrong and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * we can not use it. Disable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static void fpu__init_disable_system_xstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) xfeatures_mask_all = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cr4_clear_bits(X86_CR4_OSXSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) setup_clear_cpu_cap(X86_FEATURE_XSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Enable and initialize the xsave feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Called once per system bootup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) void __init fpu__init_system_xstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static int on_boot_cpu __initdata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) WARN_ON_FPU(!on_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) on_boot_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (!boot_cpu_has(X86_FEATURE_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pr_info("x86/fpu: No FPU detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pr_info("x86/fpu: x87 FPU will use %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) WARN_ON_FPU(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Find user xstates supported by the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) xfeatures_mask_all = eax + ((u64)edx << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * Find supervisor xstates supported by the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) xfeatures_mask_all |= ecx + ((u64)edx << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if ((xfeatures_mask_user() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * This indicates that something really unexpected happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * with the enumeration. Disable XSAVE and try to continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * booting without it. This is too early to BUG().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) xfeatures_mask_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Clear XSAVE features that are disabled in the normal CPUID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!boot_cpu_has(xsave_cpuid_features[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) xfeatures_mask_all &= ~BIT_ULL(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) xfeatures_mask_all &= fpu__get_supported_xfeatures_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* Enable xstate instructions to be able to continue with initialization: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) fpu__init_cpu_xstate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) err = init_xstate_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Update info used for ptrace frames; use standard-format size and no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * supervisor xstates:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_user());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) fpu__init_prepare_fx_sw_frame();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) setup_init_fpu_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) setup_xstate_comp_offsets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) setup_supervisor_only_offsets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) print_xstate_offset_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) xfeatures_mask_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) fpu_kernel_xstate_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* something went wrong, try to boot without any XSAVE support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) fpu__init_disable_system_xstate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * Restore minimal FPU state after suspend:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) void fpu__resume_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * Restore XCR0 on xsave capable CPUs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (boot_cpu_has(X86_FEATURE_XSAVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Restore IA32_XSS. The same CPUID bit enumerates support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * of XSAVES and MSR_IA32_XSS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (boot_cpu_has(X86_FEATURE_XSAVES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) xfeatures_mask_dynamic());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Given an xstate feature nr, calculate where in the xsave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * buffer the state is. Callers should ensure that the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!xfeature_enabled(xfeature_nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) WARN_ON_FPU(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return (void *)xsave + xstate_comp_offsets[xfeature_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Given the xsave area and a state inside, this function returns the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * address of the state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * This is the API that is called to get xstate address in either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * standard format or compacted format of xsave area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * Note that if there is no data for the field in the xsave buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * this will return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Inputs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * xstate: the thread's storage area for all FPU data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * XFEATURE_SSE, etc...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * Output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * address of the state in the xsave area, or NULL if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * field is not present in the xsave buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Do we even *have* xsave state?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!boot_cpu_has(X86_FEATURE_XSAVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * We should not ever be requesting features that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * have not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) WARN_ONCE(!(xfeatures_mask_all & BIT_ULL(xfeature_nr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) "get of unsupported state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * This assumes the last 'xsave*' instruction to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * have requested that 'xfeature_nr' be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * If it did not, we might be seeing and old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * of the field in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * This can happen because the last 'xsave' did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * request that this feature be saved (unlikely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * or because the "init optimization" caused it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * to not be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return __raw_xsave_addr(xsave, xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) EXPORT_SYMBOL_GPL(get_xsave_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * This wraps up the common operations that need to occur when retrieving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * data from xsave state. It first ensures that the current task was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * using the FPU and retrieves the data in to a buffer. It then calculates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * the offset of the requested field in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * This function is safe to call whether the FPU is in use or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Note that this only works on the current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * Inputs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * XFEATURE_SSE, etc...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * Output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * address of the state in the xsave area or NULL if the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * is not present or is in its 'init state'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) const void *get_xsave_field_ptr(int xfeature_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct fpu *fpu = ¤t->thread.fpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * fpu__save() takes the CPU's xstate registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * and saves them off to the 'fpu memory buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) fpu__save(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #ifdef CONFIG_ARCH_HAS_PKEYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * This will go out and modify PKRU register to set the access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * rights for @pkey to @init_val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) unsigned long init_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) u32 old_pkru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) u32 new_pkru_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * This check implies XSAVE support. OSPKE only gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * set if we enable XSAVE and we enable PKU in XCR0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (!boot_cpu_has(X86_FEATURE_OSPKE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * This code should only be called with valid 'pkey'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * values originating from in-kernel users. Complain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * if a bad value is observed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) WARN_ON_ONCE(pkey >= arch_max_pkey());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Set the bits we need in PKRU: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (init_val & PKEY_DISABLE_ACCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) new_pkru_bits |= PKRU_AD_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (init_val & PKEY_DISABLE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) new_pkru_bits |= PKRU_WD_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Shift the bits in to the correct place in PKRU for pkey: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) new_pkru_bits <<= pkey_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Get old PKRU and mask off any old bits in place: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) old_pkru = read_pkru();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* Write old part along with new part: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) write_pkru(old_pkru | new_pkru_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) #endif /* ! CONFIG_ARCH_HAS_PKEYS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * Weird legacy quirk: SSE and YMM states store information in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * area is marked as unused in the xfeatures header, we need to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (xfeatures & XFEATURE_MASK_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) void *init_xstate, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) membuf_write(to, from_xstate ? xstate : init_xstate, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * Convert from kernel XSAVES compacted format to standard format and copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * to a kernel-space ptrace buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * It supports partial copy but pos always starts from zero. This is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * from xstateregs_get() and there we check the CPU has XSAVES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) void copy_xstate_to_kernel(struct membuf to, struct xregs_state *xsave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct xregs_state *xinit = &init_fpstate.xsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct xstate_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) unsigned int zerofrom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * The destination is a ptrace buffer; we put in only user xstates:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) memset(&header, 0, sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) header.xfeatures = xsave->header.xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) header.xfeatures &= xfeatures_mask_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* Copy FP state up to MXCSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) &xinit->i387, off_mxcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Copy MXCSR when SSE or YMM are set in the feature mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) MXCSR_AND_FLAGS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Copy the remaining FP state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) copy_feature(header.xfeatures & XFEATURE_MASK_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) &to, &xsave->i387.st_space, &xinit->i387.st_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) sizeof(xsave->i387.st_space));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* Copy the SSE state - shared with YMM, but independently managed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) sizeof(xsave->i387.xmm_space));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Zero the padding area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) membuf_zero(&to, sizeof(xsave->i387.padding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* Copy xsave->i387.sw_reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* Copy the user space relevant state of @xsave->header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) membuf_write(&to, &header, sizeof(header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) zerofrom = offsetof(struct xregs_state, extended_state_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * The ptrace buffer is in non-compacted XSAVE format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * In non-compacted format disabled features still occupy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * state space, but there is no state to copy from in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * compacted init_fpstate. The gap tracking will zero this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!(xfeatures_mask_user() & BIT_ULL(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * If there was a feature or alignment gap, zero the space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * in the destination buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (zerofrom < xstate_offsets[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) membuf_zero(&to, xstate_offsets[i] - zerofrom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) copy_feature(header.xfeatures & BIT_ULL(i), &to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) __raw_xsave_addr(xsave, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) __raw_xsave_addr(xinit, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) xstate_sizes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Keep track of the last copied state in the non-compacted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * target buffer for gap zeroing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) zerofrom = xstate_offsets[i] + xstate_sizes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (to.left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) membuf_zero(&to, to.left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * and copy to the target thread. This is called from xstateregs_set().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) unsigned int offset, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct xstate_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) offset = offsetof(struct xregs_state, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) size = sizeof(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) memcpy(&hdr, kbuf + offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (validate_user_xstate_header(&hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) for (i = 0; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) u64 mask = ((u64)1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (hdr.xfeatures & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) void *dst = __raw_xsave_addr(xsave, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) offset = xstate_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) size = xstate_sizes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) memcpy(dst, kbuf + offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) offset = offsetof(struct fxregs_state, mxcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) size = MXCSR_AND_FLAGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * The state that came in from userspace was user-state only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * Mask all the user states out of 'xfeatures':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Add back in the features that came in from userspace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) xsave->header.xfeatures |= hdr.xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * Convert from a ptrace or sigreturn standard-format user-space buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * kernel XSAVES format and copy to the target thread. This is called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * xstateregs_set(), as well as potentially from the sigreturn() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * rt_sigreturn() system calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) unsigned int offset, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct xstate_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) offset = offsetof(struct xregs_state, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) size = sizeof(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (__copy_from_user(&hdr, ubuf + offset, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (validate_user_xstate_header(&hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) for (i = 0; i < XFEATURE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) u64 mask = ((u64)1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (hdr.xfeatures & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) void *dst = __raw_xsave_addr(xsave, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) offset = xstate_offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) size = xstate_sizes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (__copy_from_user(dst, ubuf + offset, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) offset = offsetof(struct fxregs_state, mxcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) size = MXCSR_AND_FLAGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * The state that came in from userspace was user-state only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * Mask all the user states out of 'xfeatures':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * Add back in the features that came in from userspace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) xsave->header.xfeatures |= hdr.xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Save only supervisor states to the kernel buffer. This blows away all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * old states, and is intended to be used only in __fpu__restore_sig(), where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * user states are restored from the user buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void copy_supervisor_to_kernel(struct xregs_state *xstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct xstate_header *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) u64 max_bit, min_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) u32 lmask, hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (!xfeatures_mask_supervisor())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) max_bit = __fls(xfeatures_mask_supervisor());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) min_bit = __ffs(xfeatures_mask_supervisor());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) lmask = xfeatures_mask_supervisor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) hmask = xfeatures_mask_supervisor() >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* We should never fault when copying to a kernel buffer: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (WARN_ON_FPU(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * At this point, the buffer has only supervisor states and must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * converted back to normal kernel format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) header = &xstate->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) header->xcomp_bv |= xfeatures_mask_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * This only moves states up in the buffer. Start with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * the last state and move backwards so that states are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * not overwritten until after they are moved. Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * memmove() allows overlapping src/dst buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) for (i = max_bit; i >= min_bit; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) u8 *xbuf = (u8 *)xstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (!((header->xfeatures >> i) & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Move xfeature 'i' into its normal location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) memmove(xbuf + xstate_comp_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) xbuf + xstate_supervisor_only_offsets[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) xstate_sizes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * an xsave area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * @xstate: A pointer to an xsave area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * @mask: Represent the dynamic supervisor features saved into the xsave area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * Only the dynamic supervisor states sets in the mask are saved into the xsave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * supervisor feature). Besides the dynamic supervisor states, the legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * region and XSAVE header are also saved into the xsave area. The supervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * The xsave area must be 64-bytes aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) u32 lmask, hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (WARN_ON_FPU(!dynamic_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) lmask = dynamic_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) hmask = dynamic_mask >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* Should never fault when copying to a kernel buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) WARN_ON_FPU(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * an xsave area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * @xstate: A pointer to an xsave area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * @mask: Represent the dynamic supervisor features restored from the xsave area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * Only the dynamic supervisor states sets in the mask are restored from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * dynamic supervisor feature). Besides the dynamic supervisor states, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * legacy region and XSAVE header are also restored from the xsave area. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * The xsave area must be 64-bytes aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u32 lmask, hmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (WARN_ON_FPU(!dynamic_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) lmask = dynamic_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) hmask = dynamic_mask >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* Should never fault when copying from a kernel buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) WARN_ON_FPU(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) #ifdef CONFIG_PROC_PID_ARCH_STATUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * Report the amount of time elapsed in millisecond since last AVX512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * use in the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static void avx512_status(struct seq_file *m, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (!timestamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * Report -1 if no AVX512 usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) delta = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) delta = (long)(jiffies - timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * Cap to LONG_MAX if time difference > LONG_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (delta < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) delta = LONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) delta = jiffies_to_msecs(delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * Report architecture specific information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct pid *pid, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * Report AVX512 state if the processor and build option supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (cpu_feature_enabled(X86_FEATURE_AVX512F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) avx512_status(m, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) #endif /* CONFIG_PROC_PID_ARCH_STATUS */