^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASM_FP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ASM_FP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/sigcontext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/build_bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Masks for extracting the FPSR and FPCR from the FPSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define VFP_FPSCR_STAT_MASK 0xf800009f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define VFP_FPSCR_CTRL_MASK 0x07f79f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * The VFP state has 32x64-bit registers and a single 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * control/status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define VFP_STATE_SIZE ((32 * 8) + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) extern void fpsimd_save_state(struct user_fpsimd_state *state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) extern void fpsimd_load_state(struct user_fpsimd_state *state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern void fpsimd_thread_switch(struct task_struct *next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) extern void fpsimd_flush_thread(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) extern void fpsimd_signal_preserve_current_state(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) extern void fpsimd_preserve_current_state(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern void fpsimd_restore_current_state(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extern void fpsimd_bind_task_to_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void *sve_state, unsigned int sve_vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) extern void fpsimd_flush_task_state(struct task_struct *target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) extern void fpsimd_save_and_flush_cpu_state(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Maximum VL that SVE VL-agnostic software can transparently support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SVE_VL_ARCH_MAX 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Offset of FFR in the SVE register dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline size_t sve_ffr_offset(int vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline void *sve_pffr(struct thread_struct *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) extern void sve_save_state(void *state, u32 *pfpsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) extern void sve_load_state(void const *state, u32 const *pfpsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long vq_minus_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) extern void sve_flush_live(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long vq_minus_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) extern unsigned int sve_get_vl(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct arm64_cpu_capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) extern u64 read_zcr_features(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) extern int __ro_after_init sve_max_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) extern int __ro_after_init sve_max_virtualisable_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Helpers to translate bit indices in sve_vq_map to VQ values (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * vice versa). This allows find_next_bit() to be used to find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * _maximum_ VQ not exceeding a certain value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline unsigned int __vq_to_bit(unsigned int vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return SVE_VQ_MAX - vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline unsigned int __bit_to_vq(unsigned int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return SVE_VQ_MAX - bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline bool sve_vq_available(unsigned int vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return test_bit(__vq_to_bit(vq), sve_vq_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_ARM64_SVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) extern size_t sve_state_size(struct task_struct const *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern void sve_alloc(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) extern void fpsimd_release_task(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) extern void fpsimd_sync_to_sve(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) extern void sve_sync_to_fpsimd(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) extern int sve_set_vector_length(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long vl, unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) extern int sve_set_current_vl(unsigned long arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) extern int sve_get_current_vl(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void sve_user_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void sve_user_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define sve_cond_update_zcr_vq(val, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u64 __zcr = read_sysreg_s((reg)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __new |= (val) & ZCR_ELx_LEN_MASK; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (__zcr != __new) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) write_sysreg_s(__new, (reg)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Probing and setup functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Calls to these functions must be serialised with one another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) extern void __init sve_init_vq_map(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) extern void sve_update_vq_map(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) extern int sve_verify_vq_map(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) extern void __init sve_setup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #else /* ! CONFIG_ARM64_SVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void sve_alloc(struct task_struct *task) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline void fpsimd_release_task(struct task_struct *task) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline int sve_set_current_vl(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline int sve_get_current_vl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static inline void sve_user_disable(void) { BUILD_BUG(); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline void sve_user_enable(void) { BUILD_BUG(); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline void sve_init_vq_map(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline void sve_update_vq_map(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline int sve_verify_vq_map(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline void sve_setup(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif /* ! CONFIG_ARM64_SVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* For use by EFI runtime services calls only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) extern void __efi_fpsimd_begin(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) extern void __efi_fpsimd_end(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif