^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * x86 FPU boot time init code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Initialize the registers found in all CPUs, CR0 and CR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static void fpu__init_cpu_generic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long cr4_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (boot_cpu_has(X86_FEATURE_FXSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) cr4_mask |= X86_CR4_OSFXSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (boot_cpu_has(X86_FEATURE_XMM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) cr4_mask |= X86_CR4_OSXMMEXCPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (cr4_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) cr4_set_bits(cr4_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) cr0 = read_cr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (!boot_cpu_has(X86_FEATURE_FPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) cr0 |= X86_CR0_EM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) write_cr0(cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Flush out any pending x87 state: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef CONFIG_MATH_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!boot_cpu_has(X86_FEATURE_FPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) fpstate_init_soft(¤t->thread.fpu.state.soft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) asm volatile ("fninit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Enable all supported FPU features. Called when a CPU is brought online:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void fpu__init_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) fpu__init_cpu_generic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) fpu__init_cpu_xstate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static bool fpu__probe_without_cpuid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u16 fsw, fcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) fsw = fcw = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) cr0 = read_cr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) write_cr0(cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return fsw == 0 && (fcw & 0x103f) == 0x003f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!boot_cpu_has(X86_FEATURE_CPUID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (fpu__probe_without_cpuid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) setup_force_cpu_cap(X86_FEATURE_FPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) setup_clear_cpu_cap(X86_FEATURE_FPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #ifndef CONFIG_MATH_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) for (;;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) asm volatile("hlt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Boot time FPU feature detection code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void __init fpu__init_system_mxcsr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (boot_cpu_has(X86_FEATURE_FXSR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Static because GCC does not get 16-byte stack alignment right: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct fxregs_state fxregs __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) asm volatile("fxsave %0" : "+m" (fxregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mask = fxregs.mxcsr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * If zero then use the default features mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * which has all features set, except the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * denormals-are-zero feature bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (mask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) mask = 0x0000ffbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mxcsr_feature_mask &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void __init fpu__init_system_generic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Set up the legacy init FPU context. (xstate init might overwrite this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * with a more modern format, if the CPU supports it.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) fpstate_init(&init_fpstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) fpu__init_system_mxcsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Size of the FPU context state. All tasks in the system use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * same context size, regardless of what portion they use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * This is inherent to the XSAVE architecture which puts all state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * components into a single, continuous memory block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int fpu_kernel_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Get alignment of the TYPE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Enforce that 'MEMBER' is the last field of 'TYPE'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Align the computed size with alignment of the TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * because that's how C aligns structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) TYPE_ALIGN(TYPE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * We append the 'struct fpu' to the task_struct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void __init fpu__init_task_struct_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int task_size = sizeof(struct task_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Subtract off the static size of the register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * It potentially has a bunch of padding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Add back the dynamically-calculated register state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) task_size += fpu_kernel_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * We dynamically size 'struct fpu', so we require that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * it be at the end of 'thread_struct' and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * 'thread_struct' be at the end of 'task_struct'. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * you hit a compile error here, check the structure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * see if something got added to the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) CHECK_MEMBER_AT_END_OF(struct fpu, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) arch_task_struct_size = task_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Set up the user and kernel xstate sizes based on the legacy FPU context size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * We set this up first, and later it will be overwritten by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * fpu__init_system_xstate() if the CPU knows about xstates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void __init fpu__init_system_xstate_size_legacy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int on_boot_cpu __initdata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) WARN_ON_FPU(!on_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) on_boot_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Note that xstate sizes might be overwritten later during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * fpu__init_system_xstate().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!boot_cpu_has(X86_FEATURE_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) fpu_kernel_xstate_size = sizeof(struct swregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (boot_cpu_has(X86_FEATURE_FXSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) fpu_kernel_xstate_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) sizeof(struct fxregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) fpu_kernel_xstate_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) sizeof(struct fregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) fpu_user_xstate_size = fpu_kernel_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * Find supported xfeatures based on cpu features and command-line input.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * This must be called after fpu__init_parse_early_param() is called and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * xfeatures_mask is enumerated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) u64 __init fpu__get_supported_xfeatures_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return XFEATURE_MASK_USER_SUPPORTED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) XFEATURE_MASK_SUPERVISOR_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Legacy code to initialize eager fpu mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void __init fpu__init_system_ctx_switch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static bool on_boot_cpu __initdata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) WARN_ON_FPU(!on_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) on_boot_cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Called on the boot CPU once per system bootup, to set up the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * FPU state that is later cloned into all processes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void __init fpu__init_system(struct cpuinfo_x86 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) fpu__init_system_early_generic(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * The FPU has to be operational for some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * later FPU init activities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) fpu__init_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) fpu__init_system_generic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) fpu__init_system_xstate_size_legacy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) fpu__init_system_xstate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) fpu__init_task_struct_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) fpu__init_system_ctx_switch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }