^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * FPU support code, moved here from head.S so that it can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * by chips which use other head-whatever.S files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/asm-compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define __REST_32FPVSRS(n,c,base) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) BEGIN_FTR_SECTION \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) b 2f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) REST_32FPRS(n,base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) b 3f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 2: REST_32VSRS(n,c,base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define __SAVE_32FPVSRS(n,c,base) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) BEGIN_FTR_SECTION \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) b 2f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) SAVE_32FPRS(n,base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) b 3f; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 2: SAVE_32VSRS(n,c,base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Load state from memory into FP registers including FPSCR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Assumes the caller has enabled FP in the MSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) _GLOBAL(load_fp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) lfd fr0,FPSTATE_FPSCR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MTFSF_L(fr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) REST_32FPVSRS(0, R4, R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) EXPORT_SYMBOL(load_fp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) _ASM_NOKPROBE_SYMBOL(load_fp_state); /* used by restore_math */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Store FP state into memory, including FPSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Assumes the caller has enabled FP in the MSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) _GLOBAL(store_fp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) SAVE_32FPVSRS(0, R4, R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mffs fr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) stfd fr0,FPSTATE_FPSCR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL(store_fp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * This task wants to use the FPU now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * On UP, disable FP for the task which had the FPU previously,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * and save its floating-point registers in its thread_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Load up this task's FP registers from its thread_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * enable the FPU for the current task and return to the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Note that on 32-bit this can only use registers that will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) _GLOBAL(load_up_fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mfmsr r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ori r5,r5,MSR_FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) oris r5,r5,MSR_VSX@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) END_FTR_SECTION_IFSET(CPU_FTR_VSX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MTMSRD(r5) /* enable use of fpu now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* enable use of FP after return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #ifdef CONFIG_PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) tovirt(r5, r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) lwz r4,THREAD_FPEXC_MODE(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ori r9,r9,MSR_FP /* enable FP for current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) or r9,r9,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ld r4,PACACURRENT(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) addi r5,r4,THREAD /* Get THREAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) lwz r4,THREAD_FPEXC_MODE(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ori r12,r12,MSR_FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) or r12,r12,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) std r12,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) li r4,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) stb r4,THREAD_LOAD_FP(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) addi r10,r5,THREAD_FPSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) lfd fr0,FPSTATE_FPSCR(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) MTFSF_L(fr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) REST_32FPVSRS(0, R4, R10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* restore registers and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* we haven't used ctr or xer or lr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) _ASM_NOKPROBE_SYMBOL(load_up_fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * save_fpu(tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Save the floating-point registers in its thread_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Enables the FPU for use in the kernel on return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) _GLOBAL(save_fpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) addi r3,r3,THREAD /* want THREAD of task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) PPC_LL r6,THREAD_FPSAVEAREA(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PPC_LL r5,PT_REGS(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) PPC_LCMPI 0,r6,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bne 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) addi r6,r3,THREAD_FPSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 2: SAVE_32FPVSRS(0, R4, R6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mffs fr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) stfd fr0,FPSTATE_FPSCR(r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) blr