Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Transactional memory support routines to reclaim and recheckpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * transactional process state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* See fpu.S, this is borrowed from there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define __SAVE_32FPRS_VSRS(n,c,base)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) BEGIN_FTR_SECTION				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	b	2f;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) END_FTR_SECTION_IFSET(CPU_FTR_VSX);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	SAVE_32FPRS(n,base);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	b	3f;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 2:	SAVE_32VSRS(n,c,base);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define __REST_32FPRS_VSRS(n,c,base)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) BEGIN_FTR_SECTION				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	b	2f;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) END_FTR_SECTION_IFSET(CPU_FTR_VSX);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	REST_32FPRS(n,base);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	b	3f;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 2:	REST_32VSRS(n,c,base);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define __SAVE_32FPRS_VSRS(n,c,base)	SAVE_32FPRS(n, base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define __REST_32FPRS_VSRS(n,c,base)	REST_32FPRS(n, base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define SAVE_32FPRS_VSRS(n,c,base) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	__SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define REST_32FPRS_VSRS(n,c,base) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	__REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /* Stack frame offsets for local variables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define TM_FRAME_L0	TM_FRAME_SIZE-16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define TM_FRAME_L1	TM_FRAME_SIZE-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /* In order to access the TM SPRs, TM must be enabled.  So, do so: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) _GLOBAL(tm_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	mfmsr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	li	r3, MSR_TM >> 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	sldi	r3, r3, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	and.	r0, r4, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	bne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	or	r4, r4, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	mtmsrd	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 1:	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) EXPORT_SYMBOL_GPL(tm_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) _GLOBAL(tm_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	mfmsr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	li	r3, MSR_TM >> 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	sldi	r3, r3, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	andc	r4, r4, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	mtmsrd	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) EXPORT_SYMBOL_GPL(tm_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) _GLOBAL(tm_save_sprs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	mfspr	r0, SPRN_TFHAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	std	r0, THREAD_TM_TFHAR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	mfspr	r0, SPRN_TEXASR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	std	r0, THREAD_TM_TEXASR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	mfspr	r0, SPRN_TFIAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	std	r0, THREAD_TM_TFIAR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) _GLOBAL(tm_restore_sprs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	ld	r0, THREAD_TM_TFHAR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	mtspr	SPRN_TFHAR, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	ld	r0, THREAD_TM_TEXASR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	mtspr	SPRN_TEXASR, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	ld	r0, THREAD_TM_TFIAR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	mtspr	SPRN_TFIAR, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/* Passed an 8-bit failure cause as first argument. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) _GLOBAL(tm_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	TABORT(R3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) EXPORT_SYMBOL_GPL(tm_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * void tm_reclaim(struct thread_struct *thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *		   uint8_t cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *	- Performs a full reclaim.  This destroys outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  *	  transactions and updates thread.ckpt_regs, thread.ckfp_state and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  *	  thread.ckvr_state with the original checkpointed state.  Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  *	  thread->regs is unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Purpose is to both abort transactions of, and preserve the state of,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * a transactions at a context switch. We preserve/restore both sets of process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * state to restore them when the thread's scheduled again.  We continue in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * userland as though nothing happened, but when the transaction is resumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * they will abort back to the checkpointed state we save out here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * Call with IRQs off, stacks get all out of sync for some periods in here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) _GLOBAL(tm_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	mfcr	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	mflr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	stw	r5, 8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	std	r0, 16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	std	r2, STK_GOT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	stdu	r1, -TM_FRAME_SIZE(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	std	r3, STK_PARAM(R3)(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * Save kernel live AMR since it will be clobbered by treclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * but can be used elsewhere later in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	mfspr	r3, SPRN_AMR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	std	r3, TM_FRAME_L1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	/* We need to setup MSR for VSX register save instructions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	mfmsr	r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	mr	r15, r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ori	r15, r15, MSR_FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	li	r16, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	ori	r16, r16, MSR_EE /* IRQs hard off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	andc	r15, r15, r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	oris	r15, r15, MSR_VEC@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	oris	r15,r15, MSR_VSX@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	END_FTR_SECTION_IFSET(CPU_FTR_VSX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	mtmsrd	r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	std	r14, TM_FRAME_L0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* Do sanity check on MSR to make sure we are suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	li	r7, (MSR_TS_S)@higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	srdi	r6, r14, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	and	r6, r6, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 1:	tdeqi   r6, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/* Stash the stack pointer away for use after reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	std	r1, PACAR1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	/* Clear MSR RI since we are about to use SCRATCH0, EE is already off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	li	r5, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	mtmsrd	r5, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 * BE CAREFUL HERE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 * At this point we can't take an SLB miss since we have MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	 * off. Load only to/from the stack/paca which are in SLB bolted regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 * until we turn MSR RI back on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * The moment we treclaim, ALL of our GPRs will switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * to user register state.  (FPRs, CCR etc. also!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * Use an sprg and a tm_scratch in the PACA to shuffle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	TRECLAIM(R4)				/* Cause in r4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * ******************** GPRs ********************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * Stash the checkpointed r13 in the scratch SPR and get the real paca.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	SET_SCRATCH0(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	GET_PACA(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	 * Stash the checkpointed r1 away in paca->tm_scratch and get the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	 * stack pointer back into r1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	std	r1, PACATMSCRATCH(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	ld	r1, PACAR1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	std	r11, GPR11(r1)			/* Temporary stash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	 * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 * clobbered by an exception once we turn on MSR_RI below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	ld	r11, PACATMSCRATCH(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	std	r11, GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * Store r13 away so we can free up the scratch SPR for the SLB fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * handler (needed once we start accessing the thread_struct).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	GET_SCRATCH0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	std	r11, GPR13(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	/* Reset MSR RI so we can take SLB faults again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	li	r11, MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	mtmsrd	r11, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	/* Store the PPR in r11 and reset to decent value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	mfspr	r11, SPRN_PPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	HMT_MEDIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/* Now get some more GPRS free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	std	r7, GPR7(r1)			/* Temporary stash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	std	r12, GPR12(r1)			/* ''   ''    ''   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	ld	r12, STK_PARAM(R3)(r1)		/* Param 0, thread_struct * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	std	r11, THREAD_TM_PPR(r12)		/* Store PPR and free r11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	addi	r7, r12, PT_CKPT_REGS		/* Thread's ckpt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 * Make r7 look like an exception frame so that we can use the neat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 * GPRx(n) macros. r7 is NOT a pt_regs ptr!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	subi	r7, r7, STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	SAVE_GPR(0, r7)				/* user r0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	SAVE_GPR(2, r7)				/* user r2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	SAVE_4GPRS(3, r7)			/* user r3-r6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	SAVE_GPR(8, r7)				/* user r8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	SAVE_GPR(9, r7)				/* user r9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	SAVE_GPR(10, r7)			/* user r10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	ld	r3, GPR1(r1)			/* user r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ld	r4, GPR7(r1)			/* user r7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	ld	r5, GPR11(r1)			/* user r11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	ld	r6, GPR12(r1)			/* user r12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	ld	r8, GPR13(r1)			/* user r13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	std	r3, GPR1(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	std	r4, GPR7(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	std	r5, GPR11(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	std	r6, GPR12(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	std	r8, GPR13(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	SAVE_NVGPRS(r7)				/* user r14-r31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	/* ******************** NIP ******************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	mfspr	r3, SPRN_TFHAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	std	r3, _NIP(r7)			/* Returns to failhandler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 * The checkpointed NIP is ignored when rescheduling/rechkpting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * but is used in signal return to 'wind back' to the abort handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	/* ***************** CTR, LR, CR, XER ********** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	mfctr	r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	mflr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	mfcr	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mfxer	r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	std	r3, _CTR(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	std	r4, _LINK(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	std	r5, _CCR(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	std	r6, _XER(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	/* ******************** TAR, DSCR ********** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	mfspr	r3, SPRN_TAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	mfspr	r4, SPRN_DSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	std	r3, THREAD_TM_TAR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	std	r4, THREAD_TM_DSCR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)         /* ******************** AMR **************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)         mfspr	r3, SPRN_AMR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)         std	r3, THREAD_TM_AMR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * MSR and flags: We don't change CRs, and we don't need to alter MSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 * ******************** FPR/VR/VSRs ************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	 * After reclaiming, capture the checkpointed FPRs/VRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	 * We enabled VEC/FP/VSX in the msr above, so we can execute these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	 * instructions!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	mr	r3, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	/* Altivec (VEC/VMX/VR)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	addi	r7, r3, THREAD_CKVRSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	SAVE_32VRS(0, r6, r7)	/* r6 scratch, r7 ckvr_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	mfvscr	v0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	li	r6, VRSTATE_VSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	stvx	v0, r7, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	/* VRSAVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	mfspr	r0, SPRN_VRSAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	std	r0, THREAD_CKVRSAVE(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	/* Floating Point (FP) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	addi	r7, r3, THREAD_CKFPSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	SAVE_32FPRS_VSRS(0, R6, R7)	/* r6 scratch, r7 ckfp_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	mffs    fr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	stfd    fr0,FPSTATE_FPSCR(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * TM regs, incl TEXASR -- these live in thread_struct.  Note they've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * been updated by the treclaim, to explain to userland the failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 * cause (aborted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	mfspr	r0, SPRN_TEXASR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	mfspr	r3, SPRN_TFHAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	mfspr	r4, SPRN_TFIAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	std	r0, THREAD_TM_TEXASR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	std	r3, THREAD_TM_TFHAR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	std	r4, THREAD_TM_TFIAR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/* Restore kernel live AMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	ld	r8, TM_FRAME_L1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	mtspr	SPRN_AMR, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	/* Restore original MSR/IRQ state & clear TM mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	ld	r14, TM_FRAME_L0(r1)		/* Orig MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	li	r15, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	rldimi  r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	mtmsrd  r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	addi    r1, r1, TM_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	lwz	r4, 8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	ld	r0, 16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	mtcr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	mtlr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	ld	r2, STK_GOT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	/* Load CPU's default DSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	ld	r0, PACA_DSCR_DEFAULT(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	mtspr	SPRN_DSCR, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 * void __tm_recheckpoint(struct thread_struct *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 *	- Restore the checkpointed register state saved by tm_reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	 *	  when we switch_to a process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	 *	Call with IRQs off, stacks get all out of sync for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	 *	some periods in here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) _GLOBAL(__tm_recheckpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	mfcr	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	mflr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	stw	r5, 8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	std	r0, 16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	std	r2, STK_GOT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	stdu	r1, -TM_FRAME_SIZE(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * This is used for backing up the NVGPRs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 * Save kernel live AMR since it will be clobbered for trechkpt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * but can be used elsewhere later in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	mfspr	r8, SPRN_AMR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	std	r8, TM_FRAME_L0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	/* Load complete register state from ts_ckpt* registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	addi	r7, r3, PT_CKPT_REGS		/* Thread's ckpt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	 * Make r7 look like an exception frame so that we can use the neat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	 * GPRx(n) macros. r7 is now NOT a pt_regs ptr!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	subi	r7, r7, STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	/* We need to setup MSR for FP/VMX/VSX register save instructions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	mfmsr	r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	mr	r5, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	ori	r5, r5, MSR_FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	oris	r5, r5, MSR_VEC@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	oris	r5,r5, MSR_VSX@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	END_FTR_SECTION_IFSET(CPU_FTR_VSX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	mtmsrd	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	 * FP and VEC registers: These are recheckpointed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 * thread.ckfp_state and thread.ckvr_state respectively. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	 * thread.fp_state[] version holds the 'live' (transactional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	 * and will be loaded subsequently by any FPUnavailable trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	addi	r8, r3, THREAD_CKVRSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	li	r5, VRSTATE_VSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	lvx	v0, r8, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	mtvscr	v0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	REST_32VRS(0, r5, r8)			/* r5 scratch, r8 ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	ld	r5, THREAD_CKVRSAVE(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	mtspr	SPRN_VRSAVE, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	addi	r8, r3, THREAD_CKFPSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	lfd	fr0, FPSTATE_FPSCR(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	MTFSF_L(fr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	REST_32FPRS_VSRS(0, R4, R8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	mtmsr	r6				/* FP/Vec off again! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) restore_gprs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	/* ****************** CTR, LR, XER ************* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	ld	r4, _CTR(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	ld	r5, _LINK(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	ld	r8, _XER(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	mtctr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	mtlr	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	mtxer	r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	/* ******************** TAR ******************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	ld	r4, THREAD_TM_TAR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	mtspr	SPRN_TAR,	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	/* ******************** AMR ******************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	ld	r4, THREAD_TM_AMR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	mtspr	SPRN_AMR, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	/* Load up the PPR and DSCR in GPRs only at this stage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	ld	r5, THREAD_TM_DSCR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	ld	r6, THREAD_TM_PPR(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	REST_GPR(0, r7)				/* GPR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	REST_2GPRS(2, r7)			/* GPR2-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	REST_GPR(4, r7)				/* GPR4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	REST_4GPRS(8, r7)			/* GPR8-11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	REST_2GPRS(12, r7)			/* GPR12-13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	REST_NVGPRS(r7)				/* GPR14-31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	/* Load up PPR and DSCR here so we don't run with user values for long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	mtspr	SPRN_DSCR, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	mtspr	SPRN_PPR, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	 * Do final sanity check on TEXASR to make sure FS is set. Do this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	 * here before we load up the userspace r1 so any bugs we hit will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	 * a call chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	mfspr	r5, SPRN_TEXASR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	srdi	r5, r5, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	li	r6, (TEXASR_FS)@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	and	r6, r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 1:	tdeqi	r6, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	 * Do final sanity check on MSR to make sure we are not transactional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	 * or suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	mfmsr   r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	li	r5, (MSR_TS_MASK)@higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	srdi	r6, r6, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	and	r6, r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 1:	tdnei   r6, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	/* Restore CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	ld	r6, _CCR(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	mtcr    r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	REST_GPR(6, r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	 * Store r1 and r5 on the stack so that we can access them after we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	 * clear MSR RI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	REST_GPR(5, r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	std	r5, -8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	ld	r5, GPR1(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	std	r5, -16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	REST_GPR(7, r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	/* Clear MSR RI since we are about to use SCRATCH0. EE is already off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	li	r5, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	mtmsrd	r5, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	 * BE CAREFUL HERE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	 * At this point we can't take an SLB miss since we have MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	 * off. Load only to/from the stack/paca which are in SLB bolted regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	 * until we turn MSR RI back on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	SET_SCRATCH0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	ld	r5, -8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	ld	r1, -16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	/* Commit register state as checkpointed state: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	TRECHKPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	HMT_MEDIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	 * Our transactional state has now changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	 * Now just get out of here.  Transactional (current) state will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	 * updated once restore is called on the return path in the _switch-ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	 * -to process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	GET_PACA(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	GET_SCRATCH0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	/* R1 is restored, so we are recoverable again.  EE is still off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	li	r4, MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	mtmsrd	r4, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	/* Restore kernel live AMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	ld	r8, TM_FRAME_L0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	mtspr	SPRN_AMR, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	addi    r1, r1, TM_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	lwz	r4, 8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	ld	r0, 16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	mtcr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	mtlr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	ld	r2, STK_GOT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	/* Load CPU's default DSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	ld	r0, PACA_DSCR_DEFAULT(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	mtspr	SPRN_DSCR, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	/* ****************************************************************** */