Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  PowerPC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  Adapted for Power Macintosh by Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  Low-level exception handlers and MMU support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  rewritten by Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *    Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *  This file contains the system call entry code, context switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *  code, and exception/interrupt return code for PowerPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/sys.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <asm/kup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "head_32.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * powerpc relies on return from interrupt/syscall being context synchronising
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * synchronisation instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * Align to 4k in order to ensure that all functions modyfing srr0/srr1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * fit into one page in order to not encounter a TLB miss between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * modification of srr0/srr1 and the associated rfi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	.align	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	.globl	mcheck_transfer_to_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) mcheck_transfer_to_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	mfspr	r0,SPRN_DSRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	stw	r0,_DSRR0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	mfspr	r0,SPRN_DSRR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	stw	r0,_DSRR1(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) _ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	.globl	debug_transfer_to_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) debug_transfer_to_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	mfspr	r0,SPRN_CSRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	stw	r0,_CSRR0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	mfspr	r0,SPRN_CSRR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	stw	r0,_CSRR1(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) _ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	.globl	crit_transfer_to_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) crit_transfer_to_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #ifdef CONFIG_PPC_BOOK3E_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	mfspr	r0,SPRN_MAS0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	stw	r0,MAS0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	mfspr	r0,SPRN_MAS1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	stw	r0,MAS1(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	mfspr	r0,SPRN_MAS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	stw	r0,MAS2(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	mfspr	r0,SPRN_MAS3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	stw	r0,MAS3(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	mfspr	r0,SPRN_MAS6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	stw	r0,MAS6(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #ifdef CONFIG_PHYS_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	mfspr	r0,SPRN_MAS7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	stw	r0,MAS7(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #endif /* CONFIG_PHYS_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #endif /* CONFIG_PPC_BOOK3E_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #ifdef CONFIG_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	mfspr	r0,SPRN_MMUCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	stw	r0,MMUCR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	mfspr	r0,SPRN_SRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	stw	r0,_SRR0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	mfspr	r0,SPRN_SRR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	stw	r0,_SRR1(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	/* set the stack limit to the current stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	mfspr	r8,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	lwz	r0,KSP_LIMIT(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	stw	r0,SAVED_KSP_LIMIT(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	stw	r0,KSP_LIMIT(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) _ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #ifdef CONFIG_40x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	.globl	crit_transfer_to_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) crit_transfer_to_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	lwz	r0,crit_r10@l(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	stw	r0,GPR10(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	lwz	r0,crit_r11@l(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	stw	r0,GPR11(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	mfspr	r0,SPRN_SRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	stw	r0,crit_srr0@l(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	mfspr	r0,SPRN_SRR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	stw	r0,crit_srr1@l(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	/* set the stack limit to the current stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	mfspr	r8,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	lwz	r0,KSP_LIMIT(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	stw	r0,saved_ksp_limit@l(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	stw	r0,KSP_LIMIT(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) _ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * This code finishes saving the registers to the exception frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * and jumps to the appropriate handler for the exception, turning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * on address translation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * Note that we rely on the caller having set cr0.eq iff the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * occurred in kernel mode (i.e. MSR:PR = 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	.globl	transfer_to_handler_full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) transfer_to_handler_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	SAVE_NVGPRS(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) _ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	.globl	transfer_to_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) transfer_to_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	stw	r2,GPR2(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	stw	r12,_NIP(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	stw	r9,_MSR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	andi.	r2,r9,MSR_PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	mfctr	r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	mfspr	r2,SPRN_XER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	stw	r12,_CTR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	stw	r2,_XER(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	mfspr	r12,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	tovirt_vmstack r12, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	beq	2f			/* if from user, fix up THREAD.regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	addi	r2, r12, -THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	addi	r11,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	stw	r11,PT_REGS(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Check to see if the dbcr0 register is set up to debug.  Use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	   internal debug mode bit to do this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	lwz	r12,THREAD_DBCR0(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	andis.	r12,r12,DBCR0_IDM@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	kuep_lock r11, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	beq+	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	/* From user and task is ptraced - load up global dbcr0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	li	r12,-1			/* clear all pending debug events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	mtspr	SPRN_DBSR,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	lis	r11,global_dbcr0@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	tophys(r11,r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	addi	r11,r11,global_dbcr0@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	lwz	r9,TASK_CPU(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	slwi	r9,r9,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	add	r11,r11,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	lwz	r12,0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	mtspr	SPRN_DBCR0,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	lwz	r12,4(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	addi	r12,r12,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	stw	r12,4(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	b	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 2:	/* if from kernel, check interrupted DOZE/NAP mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)          * check for stack overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)          */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	kuap_save_and_lock r11, r12, r9, r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	addi	r2, r12, -THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #ifndef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	lwz	r9,KSP_LIMIT(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	cmplw	r1,r9			/* if r1 <= ksp_limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	ble-	stack_ovf		/* then the kernel stack overflowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	lwz	r12,TI_LOCAL_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	mtcrf	0x01,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	bt-	31-TLF_NAPPING,4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	bt-	31-TLF_SLEEPING,7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	.globl transfer_to_handler_cont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) transfer_to_handler_cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	mflr	r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	tovirt_novmstack r2, r2 	/* set r2 to current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	tovirt_vmstack r9, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	lwz	r11,0(r9)		/* virtual address of handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	lwz	r9,4(r9)		/* where to go when done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	mtspr	SPRN_NRI, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	 * When tracing IRQ state (lockdep) we enable the MMU before we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * the IRQ tracing functions as they might access vmalloc space or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * perform IOs for console output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 * To speed up the syscall path where interrupts stay on, let's check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 * first if we are changing the MSR value at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	tophys_novmstack r12, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	lwz	r12,_MSR(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	andi.	r12,r12,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	bne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	/* MSR isn't changing, just transition directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	mtspr	SPRN_SRR0,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	mtspr	SPRN_SRR1,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	mtlr	r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	RFI				/* jump to handler, enable MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 4:	rlwinm	r12,r12,0,~_TLF_NAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	stw	r12,TI_LOCAL_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	b	power_save_ppc32_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 7:	rlwinm	r12,r12,0,~_TLF_SLEEPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	stw	r12,TI_LOCAL_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	rlwinm	r9,r9,0,~MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	lwz	r12,_LINK(r11)		/* and return to address in LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	kuap_restore r11, r2, r3, r4, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	lwz	r2, GPR2(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	b	fast_exception_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) _ASM_NOKPROBE_SYMBOL(transfer_to_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) _ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 1:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 * keep interrupts disabled at this point otherwise we might risk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * taking an interrupt before we tell lockdep they are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	lis	r12,reenable_mmu@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	ori	r12,r12,reenable_mmu@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	mtspr	SPRN_SRR0,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	mtspr	SPRN_SRR1,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) reenable_mmu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * We save a bunch of GPRs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 * r3 can be different from GPR3(r1) at this point, r9 and r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	 * contains the old MSR and handler address respectively,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	 * r4 & r5 can contain page fault arguments that need to be passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	 * clobbered as they aren't useful past this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	stwu	r1,-32(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	stw	r9,8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	stw	r11,12(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	stw	r3,16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	stw	r4,20(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	stw	r5,24(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	/* If we are disabling interrupts (normal case), simply log it with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 1:	bl	trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	lwz	r5,24(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	lwz	r4,20(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	lwz	r3,16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	lwz	r11,12(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	lwz	r9,8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	addi	r1,r1,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	mtctr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	mtlr	r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	bctr				/* jump to handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) #ifndef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  * On kernel stack overflow, load up an initial stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * and call StackOverflow(regs), which should not return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) stack_ovf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* sometimes we use a statically-allocated stack, which is OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	lis	r12,_end@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	ori	r12,r12,_end@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	cmplw	r1,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	ble	5b			/* r1 <= &_end is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	SAVE_NVGPRS(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	lis	r1,init_thread_union@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	addi	r1,r1,init_thread_union@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	lis	r9,StackOverflow@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	addi	r9,r9,StackOverflow@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	mtspr	SPRN_NRI, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	mtspr	SPRN_SRR0,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	mtspr	SPRN_SRR1,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) _ASM_NOKPROBE_SYMBOL(stack_ovf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) trace_syscall_entry_irq_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * Syscall shouldn't happen while interrupts are disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * so let's do a warning here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 0:	trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	bl	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	/* Now enable for real */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	mtmsr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	REST_GPR(0, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	REST_4GPRS(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	REST_2GPRS(7, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	b	DoSyscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	.globl	transfer_to_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) transfer_to_syscall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	kuep_lock r11, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	andi.	r12,r9,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	beq-	trace_syscall_entry_irq_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * Handle a system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	.stabs	"entry_32.S",N_SO,0,0,0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) _GLOBAL(DoSyscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	stw	r3,ORIG_GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	li	r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	stw	r12,RESULT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	/* Make sure interrupts are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	mfmsr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	andi.	r12,r11,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	/* We came in with interrupts disabled, we WARN and mark them enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	 * for lockdep now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 0:	tweqi	r12, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	lwz	r11,TI_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	bne-	syscall_dotrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) syscall_dotrace_cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	cmplwi	0,r0,NR_syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	lis	r10,sys_call_table@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	ori	r10,r10,sys_call_table@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	slwi	r0,r0,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	bge-	66f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	barrier_nospec_asm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 * Prevent the load of the handler below (based on the user-passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * system call number) being speculatively executed until the test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * against NR_syscalls and branch to .66f above has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * committed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	mtlr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	addi	r9,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	PPC440EP_ERR42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	blrl			/* Call handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	.globl	ret_from_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) ret_from_syscall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #ifdef CONFIG_DEBUG_RSEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	/* Check whether the syscall is issued inside a restartable sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	stw	r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	addi    r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	bl      rseq_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	lwz	r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	mr	r6,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	/* disable interrupts so current_thread_info()->flags can't change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	/* Note: We don't bother telling lockdep about it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	mtmsr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	lwz	r9,TI_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	li	r8,-MAX_ERRNO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	bne-	syscall_exit_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	cmplw	0,r3,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	blt+	syscall_exit_cont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	lwz	r11,_CCR(r1)			/* Load CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	neg	r3,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	oris	r11,r11,0x1000	/* Set SO bit in CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	stw	r11,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) syscall_exit_cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	lwz	r8,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/* If we are going to return from the syscall with interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	 * off, we trace that here. It shouldn't normally happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	andi.	r10,r8,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	bne+	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	stw	r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	bl      trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	lwz	r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	/* If the process has its own DBCR0 value, load it up.  The internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	   debug mode bit tells us that dbcr0 should be loaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	lwz	r0,THREAD+THREAD_DBCR0(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	andis.	r10,r0,DBCR0_IDM@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	bnel-	load_dbcr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) #ifdef CONFIG_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) BEGIN_MMU_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	lis	r4,icache_44x_need_flush@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	lwz	r5,icache_44x_need_flush@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	cmplwi	cr0,r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	bne-	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) #endif /* CONFIG_44x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	lwarx	r7,0,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	stwcx.	r0,0,r1			/* to clear the reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	kuep_unlock r5, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	kuap_check r2, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	lwz	r4,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	lwz	r5,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	mtlr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	mtcr	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	lwz	r7,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	lwz	r2,GPR2(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	lwz	r1,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) syscall_exit_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	mtspr	SPRN_NRI, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	mtspr	SPRN_SRR0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	mtspr	SPRN_SRR1,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) _ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) #ifdef CONFIG_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 2:	li	r7,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	iccci	r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	stw	r7,icache_44x_need_flush@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	b	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) #endif  /* CONFIG_44x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 66:	li	r3,-ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	b	ret_from_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	.globl	ret_from_fork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) ret_from_fork:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	bl	schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	li	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	b	ret_from_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	.globl	ret_from_kernel_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) ret_from_kernel_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	bl	schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	mtlr	r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	mr	r3,r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	PPC440EP_ERR42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	blrl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	li	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	b	ret_from_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) /* Traced system call support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) syscall_dotrace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	li	r0,0xc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	stw	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	bl	do_syscall_trace_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	 * Restore argument registers possibly just changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	 * We use the return value of do_syscall_trace_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	 * for call number to look up in the table (r0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	mr	r0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	lwz	r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	lwz	r4,GPR4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	lwz	r5,GPR5(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	lwz	r6,GPR6(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	lwz	r7,GPR7(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	lwz	r8,GPR8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	cmplwi	r0,NR_syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	bge-	ret_from_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	b	syscall_dotrace_cont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) syscall_exit_work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	andi.	r0,r9,_TIF_RESTOREALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	beq+	0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	b	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 0:	cmplw	0,r3,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	blt+	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	andi.	r0,r9,_TIF_NOERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	bne-	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	lwz	r11,_CCR(r1)			/* Load CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	neg	r3,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	oris	r11,r11,0x1000	/* Set SO bit in CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	stw	r11,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 1:	stw	r6,RESULT(r1)	/* Save result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	stw	r3,GPR3(r1)	/* Update return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 2:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	beq	4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/* Clear per-syscall TIF flags if any are set.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	li	r11,_TIF_PERSYSCALL_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	addi	r12,r2,TI_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 3:	lwarx	r8,0,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	andc	r8,r8,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	stwcx.	r8,0,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	bne-	3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 4:	/* Anything which requires enabling interrupts? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	beq	ret_from_except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	/* Re-enable interrupts. There is no need to trace that with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 * lockdep as we are supposed to have IRQs on at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	ori	r10,r10,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	mtmsr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	/* Save NVGPRS if they're not saved already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	lwz	r4,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	andi.	r4,r4,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	beq	5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	li	r4,0xc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	stw	r4,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	bl	do_syscall_trace_leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	b	ret_from_except_full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 * System call was called from kernel. We get here with SRR1 in r9.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 * Mark the exception as recoverable once we have retrieved SRR0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * trap a warning and return ENOSYS with CR[SO] set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	.globl	ret_from_kernel_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) ret_from_kernel_syscall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	mfspr	r9, SPRN_SRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	mfspr	r10, SPRN_SRR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) #if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	mtmsr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 0:	trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	li	r3, ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	crset	so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	mtspr	SPRN_NRI, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	mtspr	SPRN_SRR0, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	mtspr	SPRN_SRR1, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) _ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * The fork/clone functions need to copy the full register set into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * the child process. Therefore we need to save all the nonvolatile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  * registers (r13 - r31) before calling the C code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	.globl	ppc_fork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) ppc_fork:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	stw	r0,_TRAP(r1)		/* register set saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	b	sys_fork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	.globl	ppc_vfork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) ppc_vfork:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	stw	r0,_TRAP(r1)		/* register set saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	b	sys_vfork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	.globl	ppc_clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) ppc_clone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	stw	r0,_TRAP(r1)		/* register set saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	b	sys_clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	.globl	ppc_clone3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) ppc_clone3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	stw	r0,_TRAP(r1)		/* register set saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	b	sys_clone3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	.globl	ppc_swapcontext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) ppc_swapcontext:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	stw	r0,_TRAP(r1)		/* register set saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	b	sys_swapcontext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * Top-level page fault handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * This is in assembler because if do_page_fault tells us that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * it is a bad kernel page fault, we want to save the non-volatile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * registers before calling bad_page_fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	.globl	handle_page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) handle_page_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	andis.  r0,r5,DSISR_DABRMATCH@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	bne-    handle_dabr_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	bl	do_page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	cmpwi	r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	beq+	ret_from_except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	clrrwi	r0,r0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	stw	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	mr	r5,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	lwz	r4,_DAR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	bl	bad_page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	b	ret_from_except_full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	/* We have a data breakpoint exception - handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) handle_dabr_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	lwz	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	clrrwi	r0,r0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	stw	r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	bl      do_break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	b	ret_from_except_full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  * This routine switches between two different tasks.  The process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * state of one is saved on its kernel stack.  Then the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * of the other is restored from its kernel stack.  The memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * management hardware is updated to the second process's state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * Finally, we can return to the second process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * On entry, r3 points to the THREAD for the current task, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * points to the THREAD for the new task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  * This routine is always called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * Note: there are two ways to get to the "going out" portion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * of this code; either by coming in via the entry (_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * or via "fork" which must set up an environment equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * to the "_switch" path.  If you change this , you'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * change the fork code also.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  * The code which creates the new task context is in 'copy_thread'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * in arch/ppc/kernel/process.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) _GLOBAL(_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	stwu	r1,-INT_FRAME_SIZE(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	mflr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	stw	r0,INT_FRAME_SIZE+4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	/* r3-r12 are caller saved -- Cort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	stw	r0,_NIP(r1)	/* Return to switch caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	mfmsr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	li	r0,MSR_FP	/* Disable floating-point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	stw	r12,THREAD+THREAD_VRSAVE(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	stw	r12,THREAD+THREAD_SPEFSCR(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) END_FTR_SECTION_IFSET(CPU_FTR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	beq+	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	andc	r11,r11,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	mtmsr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 1:	stw	r11,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	mfcr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	stw	r10,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	stw	r1,KSP(r3)	/* Set old stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	kuap_check r2, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	/* We need a sync somewhere here to make sure that if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * previous task gets rescheduled on another CPU, it sees all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * stores it has performed on this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	tophys(r0,r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	lwz	r1,KSP(r4)	/* Load new stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/* save the old current 'last' for return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	mr	r3,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	addi	r2,r4,-THREAD	/* Update current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	lwz	r0,THREAD+THREAD_VRSAVE(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) END_FTR_SECTION_IFSET(CPU_FTR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	lwz	r0,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	mtcrf	0xFF,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* r3-r12 are destroyed -- Cort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	mtlr	r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	addi	r1,r1,INT_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	.globl	fast_exception_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) fast_exception_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	beq	1f			/* if not, we've got problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 2:	REST_4GPRS(3, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	lwz	r10,_CCR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	REST_GPR(1, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	mtcr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	lwz	r10,_LINK(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	mtlr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	li	r10, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	stw	r10, 8(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	REST_GPR(10, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	mtspr	SPRN_NRI, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	mtspr	SPRN_SRR1,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	mtspr	SPRN_SRR0,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	REST_GPR(9, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	REST_GPR(12, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	lwz	r11,GPR11(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) _ASM_NOKPROBE_SYMBOL(fast_exception_return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) /* check if the exception happened in a restartable section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 1:	lis	r3,exc_exit_restart_end@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	addi	r3,r3,exc_exit_restart_end@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	cmplw	r12,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	bge	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	lis	r4,exc_exit_restart@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	addi	r4,r4,exc_exit_restart@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	cmplw	r12,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	blt	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	lis	r3,fee_restarts@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	tophys(r3,r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	lwz	r5,fee_restarts@l(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	addi	r5,r5,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	stw	r5,fee_restarts@l(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	mr	r12,r4		/* restart at exc_exit_restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	b	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	.section .bss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) fee_restarts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	.space	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	.previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) /* aargh, a nonrecoverable interrupt, panic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) /* aargh, we don't know which trap this is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	li	r10,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	stw	r10,_TRAP(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	lis	r10,MSR_KERNEL@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	ori	r10,r10,MSR_KERNEL@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	bl	transfer_to_handler_full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	.long	unrecoverable_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	.long	ret_from_except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	.globl	ret_from_except_full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) ret_from_except_full:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	/* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	.globl	ret_from_except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) ret_from_except:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/* Hard-disable interrupts so that current_thread_info()->flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	 * can't change between when we test it and when we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	 * from the interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	/* Note: We don't bother telling lockdep about it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	mtmsr	r10		/* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	lwz	r3,_MSR(r1)	/* Returning to user mode? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	andi.	r0,r3,MSR_PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	beq	resume_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) user_exc_return:		/* r10 contains MSR_KERNEL here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* Check current_thread_info()->flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	lwz	r9,TI_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	andi.	r0,r9,_TIF_USER_WORK_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	bne	do_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) restore_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/* Check whether this process has its own DBCR0 value.  The internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	   debug mode bit tells us that dbcr0 should be loaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	lwz	r0,THREAD+THREAD_DBCR0(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	andis.	r10,r0,DBCR0_IDM@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	bnel-	load_dbcr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) #ifdef CONFIG_PPC_BOOK3S_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	kuep_unlock	r10, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	b	restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) /* N.B. the only way to get here is from the beq following ret_from_except. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) resume_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	lwz	r8,TI_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	beq+	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	lwz	r3,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	mr	r4,r1			/* src:  current exception frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	mr	r1,r3			/* Reroute the trampoline frame to r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	/* Copy from the original to the trampoline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	li	r6,0			/* start offset: 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	mtctr	r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 2:	lwzx	r0,r6,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	stwx	r0,r6,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	addi	r6,r6,4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	bdnz	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	/* Do real store operation to complete stwu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	lwz	r5,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	stw	r8,0(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	/* Clear _TIF_EMULATE_STACK_STORE flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	lis	r11,_TIF_EMULATE_STACK_STORE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	addi	r5,r2,TI_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 0:	lwarx	r8,0,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	andc	r8,r8,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	stwcx.	r8,0,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	bne-	0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	/* check current_thread_info->preempt_count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	lwz	r0,TI_PREEMPT(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	bne	restore_kuap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	andi.	r8,r8,_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	beq+	restore_kuap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	lwz	r3,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	andi.	r0,r3,MSR_EE	/* interrupts off? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	beq	restore_kuap	/* don't schedule if so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* Lockdep thinks irqs are enabled, we need to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 * preempt_schedule_irq with IRQs off, so we inform lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 * now that we -did- turn them off already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	bl	trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	bl	preempt_schedule_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	/* And now, to properly rebalance the above, we tell lockdep they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * are being turned back on, which will happen when we return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	bl	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) #endif /* CONFIG_PREEMPTION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) restore_kuap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	kuap_restore r1, r2, r9, r10, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	/* interrupts are hard-disabled at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) #ifdef CONFIG_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) BEGIN_MMU_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	lis	r4,icache_44x_need_flush@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	lwz	r5,icache_44x_need_flush@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	cmplwi	cr0,r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	beq+	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	li	r6,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	iccci	r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	stw	r6,icache_44x_need_flush@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) #endif  /* CONFIG_44x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	lwz	r9,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * off in this assembly code while peeking at TI_FLAGS() and such. However
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * we need to inform it if the exception turned interrupts off, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 * are about to trun them back on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	andi.	r10,r9,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	stwu	r1,-32(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	mflr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	stw	r0,4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	bl	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	addi	r1, r1, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	lwz	r9,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) #endif /* CONFIG_TRACE_IRQFLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	lwz	r0,GPR0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	lwz	r2,GPR2(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	REST_4GPRS(3, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	REST_2GPRS(7, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	lwz	r10,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	lwz	r11,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	mtspr	SPRN_XER,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	mtctr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	lwarx	r11,0,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	stwcx.	r0,0,r1			/* to clear the reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	lwz	r10,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	lwz	r11,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	mtcrf	0xFF,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	mtlr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	li	r10, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	stw	r10, 8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 * Once we put values in SRR0 and SRR1, we are in a state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * where exceptions are not recoverable, since taking an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * exception will trash SRR0 and SRR1.  Therefore we clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * MSR:RI bit to indicate this.  If we do take an exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * we can't return to the point of the exception but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * can restart the exception exit path at the label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * exc_exit_restart below.  -- paulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	mtmsr	r10		/* clear the RI bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	.globl exc_exit_restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) exc_exit_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	lwz	r12,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	mtspr	SPRN_SRR0,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	mtspr	SPRN_SRR1,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	REST_4GPRS(9, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	lwz	r1,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	.globl exc_exit_restart_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) exc_exit_restart_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) _ASM_NOKPROBE_SYMBOL(exc_exit_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) _ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * This is a bit different on 4xx/Book-E because it doesn't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * the RI bit in the MSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * The TLB miss handler checks if we have interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 * the exception exit path and restarts it if so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 * (well maybe one day it will... :).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	lwz	r11,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	mtlr	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	lwz	r10,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	mtcrf	0xff,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	li	r10, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	stw	r10, 8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	REST_2GPRS(9, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	.globl exc_exit_restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) exc_exit_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	lwz	r11,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	lwz	r12,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	mtspr	SPRN_SRR0,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	mtspr	SPRN_SRR1,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	REST_2GPRS(11, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	lwz	r1,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	.globl exc_exit_restart_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) exc_exit_restart_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	b	.			/* prevent prefetch past rfi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) _ASM_NOKPROBE_SYMBOL(exc_exit_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)  * Returning from a critical interrupt in user mode doesn't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)  * to be any different from a normal exception.  For a critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)  * interrupt in the kernel, we just return (without checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)  * preemption) since the interrupt may have happened at some crucial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  * place (e.g. inside the TLB miss handler), and because we will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)  * running with r1 pointing into critical_stack, not the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)  * process's kernel stack (and therefore current_thread_info() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)  * give the wrong answer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)  * We have to restore various SPRs that may have been in use at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)  * time of the critical interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #ifdef CONFIG_40x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #define PPC_40x_TURN_OFF_MSR_DR						    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	 * assume the instructions here are mapped by a pinned TLB entry */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	li	r10,MSR_IR;						    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	mtmsr	r10;							    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	isync;								    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	tophys(r1, r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #define PPC_40x_TURN_OFF_MSR_DR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	REST_NVGPRS(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	lwz	r3,_MSR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	andi.	r3,r3,MSR_PR;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	bne	user_exc_return;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	lwz	r0,GPR0(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	lwz	r2,GPR2(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	REST_4GPRS(3, r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	REST_2GPRS(7, r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	lwz	r10,_XER(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	lwz	r11,_CTR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	mtspr	SPRN_XER,r10;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	mtctr	r11;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	stwcx.	r0,0,r1;		/* to clear the reservation */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	lwz	r11,_LINK(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	mtlr	r11;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	lwz	r10,_CCR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	mtcrf	0xff,r10;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	PPC_40x_TURN_OFF_MSR_DR;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	lwz	r9,_DEAR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	lwz	r10,_ESR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	mtspr	SPRN_DEAR,r9;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	mtspr	SPRN_ESR,r10;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	lwz	r11,_NIP(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	lwz	r12,_MSR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	mtspr	exc_lvl_srr0,r11;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	mtspr	exc_lvl_srr1,r12;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	lwz	r9,GPR9(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	lwz	r12,GPR12(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	lwz	r10,GPR10(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	lwz	r11,GPR11(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	lwz	r1,GPR1(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	exc_lvl_rfi;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	b	.;		/* prevent prefetch past exc_lvl_rfi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) #define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	lwz	r9,_##exc_lvl_srr0(r1);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	lwz	r10,_##exc_lvl_srr1(r1);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	mtspr	SPRN_##exc_lvl_srr0,r9;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	mtspr	SPRN_##exc_lvl_srr1,r10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) #if defined(CONFIG_PPC_BOOK3E_MMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) #ifdef CONFIG_PHYS_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) #define	RESTORE_MAS7							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	lwz	r11,MAS7(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	mtspr	SPRN_MAS7,r11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #define	RESTORE_MAS7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #endif /* CONFIG_PHYS_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #define RESTORE_MMU_REGS						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	lwz	r9,MAS0(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	lwz	r10,MAS1(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	lwz	r11,MAS2(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	mtspr	SPRN_MAS0,r9;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	lwz	r9,MAS3(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	mtspr	SPRN_MAS1,r10;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	lwz	r10,MAS6(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	mtspr	SPRN_MAS2,r11;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	mtspr	SPRN_MAS3,r9;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	mtspr	SPRN_MAS6,r10;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	RESTORE_MAS7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) #elif defined(CONFIG_44x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) #define RESTORE_MMU_REGS						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	lwz	r9,MMUCR(r1);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	mtspr	SPRN_MMUCR,r9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) #define RESTORE_MMU_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #ifdef CONFIG_40x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	.globl	ret_from_crit_exc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ret_from_crit_exc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	mfspr	r9,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	lis	r10,saved_ksp_limit@ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	lwz	r10,saved_ksp_limit@l(r10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	tovirt(r9,r9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	stw	r10,KSP_LIMIT(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	lis	r9,crit_srr0@ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	lwz	r9,crit_srr0@l(r9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	lis	r10,crit_srr1@ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	lwz	r10,crit_srr1@l(r10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	mtspr	SPRN_SRR0,r9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	mtspr	SPRN_SRR1,r10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #endif /* CONFIG_40x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	.globl	ret_from_crit_exc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ret_from_crit_exc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	mfspr	r9,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	lwz	r10,SAVED_KSP_LIMIT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	stw	r10,KSP_LIMIT(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	RESTORE_xSRR(SRR0,SRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	RESTORE_MMU_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	.globl	ret_from_debug_exc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ret_from_debug_exc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	mfspr	r9,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	lwz	r10,SAVED_KSP_LIMIT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	stw	r10,KSP_LIMIT(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	RESTORE_xSRR(SRR0,SRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	RESTORE_xSRR(CSRR0,CSRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	RESTORE_MMU_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	.globl	ret_from_mcheck_exc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ret_from_mcheck_exc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	mfspr	r9,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	lwz	r10,SAVED_KSP_LIMIT(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	stw	r10,KSP_LIMIT(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	RESTORE_xSRR(SRR0,SRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	RESTORE_xSRR(CSRR0,CSRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	RESTORE_xSRR(DSRR0,DSRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	RESTORE_MMU_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) #endif /* CONFIG_BOOKE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * Load the DBCR0 value for a task that is being ptraced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  * having first saved away the global DBCR0.  Note that r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  * has the dbcr0 value to set upon entry to this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) load_dbcr0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	mfmsr	r10		/* first disable debug exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	rlwinm	r10,r10,0,~MSR_DE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	mtmsr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	mfspr	r10,SPRN_DBCR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	lis	r11,global_dbcr0@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	addi	r11,r11,global_dbcr0@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	lwz	r9,TASK_CPU(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	slwi	r9,r9,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	add	r11,r11,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	stw	r10,0(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	mtspr	SPRN_DBCR0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	lwz	r10,4(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	addi	r10,r10,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	stw	r10,4(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	li	r11,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	.section .bss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	.align	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	.global global_dbcr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) global_dbcr0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	.space	8*NR_CPUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	.previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) do_work:			/* r10 contains MSR_KERNEL here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	andi.	r0,r9,_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	beq	do_user_signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) do_resched:			/* r10 contains MSR_KERNEL here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	bl	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	mfmsr	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	ori	r10,r10,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	mtmsr	r10		/* hard-enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	bl	schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) recheck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	/* Note: And we don't tell it we are disabling them again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	 * neither. Those disable/enable cycles used to peek at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	 * TI_FLAGS aren't advertised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	mtmsr	r10		/* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	lwz	r9,TI_FLAGS(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	andi.	r0,r9,_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	bne-	do_resched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	andi.	r0,r9,_TIF_USER_WORK_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	beq	restore_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) do_user_signal:			/* r10 contains MSR_KERNEL here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	ori	r10,r10,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	mtmsr	r10		/* hard-enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/* save r13-r31 in the exception frame, if not already done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	lwz	r3,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	andi.	r0,r3,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	beq	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	rlwinm	r3,r3,0,0,30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	stw	r3,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 2:	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	mr	r4,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	bl	do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	b	recheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  * We come here when we are at the end of handling an exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  * that occurred at a place where taking an exception will lose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  * state information, such as the contents of SRR0 and SRR1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) nonrecoverable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	lis	r10,exc_exit_restart_end@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	addi	r10,r10,exc_exit_restart_end@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	cmplw	r12,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	bge	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	lis	r11,exc_exit_restart@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	addi	r11,r11,exc_exit_restart@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	cmplw	r12,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	blt	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	lis	r10,ee_restarts@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	lwz	r12,ee_restarts@l(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	addi	r12,r12,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	stw	r12,ee_restarts@l(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	mr	r12,r11		/* restart at exc_exit_restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 3:	/* OK, we can't recover, kill this process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	lwz	r3,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	andi.	r0,r3,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	beq	5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	rlwinm	r3,r3,0,0,30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	stw	r3,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 5:	mfspr	r2,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	addi	r2,r2,-THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	tovirt(r2,r2)			/* set back r2 to current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 4:	addi	r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	bl	unrecoverable_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	/* shouldn't return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	b	4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) _ASM_NOKPROBE_SYMBOL(nonrecoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	.section .bss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ee_restarts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	.space	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	.previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * PROM code for specific machines follows.  Put it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * here so it's easy to add arch-specific sections later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * -- Cort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) #ifdef CONFIG_PPC_RTAS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * called with the MMU off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) _GLOBAL(enter_rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	stwu	r1,-INT_FRAME_SIZE(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	mflr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	stw	r0,INT_FRAME_SIZE+4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	LOAD_REG_ADDR(r4, rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	lis	r6,1f@ha	/* physical return address for rtas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	addi	r6,r6,1f@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	tophys(r6,r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	tophys_novmstack r7, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	lwz	r8,RTASENTRY(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	lwz	r4,RTASBASE(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	mfmsr	r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	stw	r9,8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	mtlr	r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	stw	r7, THREAD + RTAS_SP(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	mtspr	SPRN_SRR0,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	mtspr	SPRN_SRR1,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 1:	tophys_novmstack r9, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	mtmsr	r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	lwz	r9,8(r9)	/* original msr value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	addi	r1,r1,INT_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	li	r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	tophys_novmstack r7, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	stw	r0, THREAD + RTAS_SP(r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	mtspr	SPRN_SRR0,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	mtspr	SPRN_SRR1,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	RFI			/* return to caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) _ASM_NOKPROBE_SYMBOL(enter_rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) #endif /* CONFIG_PPC_RTAS */