Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Low-level exception handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2004 - 2008 by Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 2015 Cadence Design Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/asmmacro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/coprocessor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/asm-uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <variant/tie-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /* Unimplemented features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #undef KERNEL_STACK_OVERFLOW_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) /* Not well tested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * - fast_coprocessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * Macro to find first bit set in WINDOWBASE from the left + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * 100....0 -> 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * 010....0 -> 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * 000....1 -> WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	.macro ffs_ws bit mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #if XCHAL_HAVE_NSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	movi    \bit, WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #if WSBITS > 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	_bltui  \mask, 0x10000, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	addi    \bit, \bit, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	extui   \mask, \mask, 16, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #if WSBITS > 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 99:	_bltui  \mask, 0x100, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	addi    \bit, \bit, -8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	srli    \mask, \mask, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 99:	_bltui  \mask, 0x10, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	addi    \bit, \bit, -4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	srli    \mask, \mask, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 99:	_bltui  \mask, 0x4, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	addi    \bit, \bit, -2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	srli    \mask, \mask, 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 99:	_bltui  \mask, 0x2, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	addi    \bit, \bit, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	.macro	irq_save flags tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #if XTENSA_FAKE_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	rsr	\flags, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	bgei	\tmp, LOCKLEVEL, 99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	rsil	\tmp, LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	movi	\tmp, LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	rsr	\flags, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	or	\flags, \flags, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	xsr	\flags, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	rsil	\flags, LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * First-level exception handler for user exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Save some special registers, extra states and all registers in the AR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * register file that were in use in the user task, and jump to the common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  * exception code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * save them for kernel exceptions).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * Entry condition for user_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  *   a2:	new stack pointer, original value in depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  *   excsave1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * Entry condition for _user_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *   excsave has been restored, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *   stack pointer (a1) has been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * Note: _user_exception might be at an odd address. Don't use call0..call12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	.literal_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) ENTRY(user_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	/* Save a1, a2, a3, and set SP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	rsr	a0, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	s32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	s32i	a0, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	mov	a1, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	.globl _user_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) _user_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	/* Save SAR and turn off single stepping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	movi	a2, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	wsr	a2, depc		# terminate user stack trace with 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	rsr	a3, sar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	xsr	a2, icountlevel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	s32i	a3, a1, PT_SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	s32i	a2, a1, PT_ICOUNTLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #if XCHAL_HAVE_THREADPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	rur	a2, threadptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	s32i	a2, a1, PT_THREADPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	/* Rotate ws so that the current windowbase is at bit0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	rsr	a2, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	rsr	a3, windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	ssr	a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	s32i	a2, a1, PT_WINDOWBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	s32i	a3, a1, PT_WINDOWSTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	slli	a2, a3, 32-WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	src	a2, a3, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	srli	a2, a2, 32-WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	s32i	a2, a1, PT_WMASK	# needed for restoring registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* Save only live registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	_bbsi.l	a2, 1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	s32i	a4, a1, PT_AREG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	s32i	a5, a1, PT_AREG5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	s32i	a6, a1, PT_AREG6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	s32i	a7, a1, PT_AREG7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	_bbsi.l	a2, 2, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	s32i	a8, a1, PT_AREG8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	s32i	a9, a1, PT_AREG9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	s32i	a10, a1, PT_AREG10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	s32i	a11, a1, PT_AREG11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	_bbsi.l	a2, 3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	s32i	a12, a1, PT_AREG12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	s32i	a13, a1, PT_AREG13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	s32i	a14, a1, PT_AREG14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	s32i	a15, a1, PT_AREG15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	_bnei	a2, 1, 1f		# only one valid frame?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	/* Only one valid frame, skip saving regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	j	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	/* Save the remaining registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * We have to save all registers up to the first '1' from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 * the right, except the current frame (bit 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * Assume a2 is:  001001000110001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 * All register frames starting from the top field to the marked '1'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 * must be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 1:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	and	a3, a3, a2		# max. only one bit is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	/* Find number of frames to save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	ffs_ws	a0, a3			# number of frames to the '1' from left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/* Store information into WMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 * bits 4...: number of valid 4-register frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	slli	a3, a0, 4		# number of frames to save in bits 8..4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	extui	a2, a2, 0, 4		# mask for the first 16 registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	or	a2, a3, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/* Save 4 registers at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 1:	rotw	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	s32i	a0, a5, PT_AREG_END - 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	s32i	a1, a5, PT_AREG_END - 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	s32i	a2, a5, PT_AREG_END - 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	s32i	a3, a5, PT_AREG_END - 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	addi	a0, a4, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	addi	a1, a5, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	_bnez	a0, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	/* WINDOWBASE still in SAR! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	rsr	a2, sar			# original WINDOWBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	movi	a3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	ssl	a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	sll	a3, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	wsr	a2, windowbase		# and WINDOWSTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/* We are back to the original stack pointer (a1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 2:	/* Now, jump to the common exception handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	j	common_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) ENDPROC(user_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * First-level exit handler for kernel exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * Save special registers and the live window frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * Note: Even though we changes the stack pointer, we don't have to do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *	 MOVSP here, as we do that when we return from the exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  *	 (See comment in the kernel exception exit code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * Entry condition for kernel_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * Entry condition for _kernel_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  *   excsave has been restored, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  *   stack pointer (a1) has been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * Note: _kernel_exception might be at an odd address. Don't use call0..call12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) ENTRY(kernel_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	/* Save a1, a2, a3, and set SP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	rsr	a0, depc		# get a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	s32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	s32i	a0, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	mov	a1, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	.globl _kernel_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) _kernel_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	/* Save SAR and turn off single stepping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	movi	a2, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	rsr	a3, sar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	xsr	a2, icountlevel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	s32i	a3, a1, PT_SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	s32i	a2, a1, PT_ICOUNTLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	/* Rotate ws so that the current windowbase is at bit0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	rsr	a2, windowbase		# don't need to save these, we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	rsr	a3, windowstart		# need shifted windowstart: windowmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	ssr	a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	slli	a2, a3, 32-WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	src	a2, a3, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	srli	a2, a2, 32-WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	/* Save only the live window-frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	_bbsi.l	a2, 1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	s32i	a4, a1, PT_AREG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	s32i	a5, a1, PT_AREG5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	s32i	a6, a1, PT_AREG6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	s32i	a7, a1, PT_AREG7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	_bbsi.l	a2, 2, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	s32i	a8, a1, PT_AREG8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	s32i	a9, a1, PT_AREG9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	s32i	a10, a1, PT_AREG10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	s32i	a11, a1, PT_AREG11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	_bbsi.l	a2, 3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	s32i	a12, a1, PT_AREG12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	s32i	a13, a1, PT_AREG13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	s32i	a14, a1, PT_AREG14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	s32i	a15, a1, PT_AREG15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	_bnei	a2, 1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	/* Copy spill slots of a0 and a1 to imitate movsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * in order to keep exception stack continuous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	l32i	a3, a1, PT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	l32i	a0, a1, PT_SIZE + 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	s32e	a3, a1, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	s32e	a0, a1, -12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	l32i	a0, a1, PT_AREG0	# restore saved a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	wsr	a0, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #ifdef KERNEL_STACK_OVERFLOW_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	/*  Stack overflow check, for debugging  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	extui	a2, a1, TASK_SIZE_BITS,XX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	movi	a3, SIZE??
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	_bge	a2, a3, out_of_stack_panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  * This is the common exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  * We get here from the user exception handler or simply by falling through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * from the kernel exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * Save the remaining special registers, switch to kernel mode, and jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * to the second-level exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) common_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	/* Save some registers, disable loops and clear the syscall flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	rsr	a2, debugcause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	rsr	a3, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	s32i	a2, a1, PT_DEBUGCAUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	s32i	a3, a1, PT_PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	movi	a2, NO_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	rsr	a3, excvaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	s32i	a2, a1, PT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	movi	a2, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	s32i	a3, a1, PT_EXCVADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) #if XCHAL_HAVE_LOOPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	xsr	a2, lcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	s32i	a2, a1, PT_LCOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #if XCHAL_HAVE_EXCLUSIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	/* Clear exclusive access monitor set by interrupted code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	clrex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	rsr	a2, exccause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	movi	a3, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	rsr	a0, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	s32i	a2, a1, PT_EXCCAUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	s32i	a3, a0, EXC_TABLE_FIXUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	/* All unrecoverable states are saved on stack, now, and a1 is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * Now we can allow exceptions again. In case we've got an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * otherwise it's left unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	rsr	a3, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	s32i	a3, a1, PT_PS		# save ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #if XTENSA_FAKE_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	/* Correct PS needs to be saved in the PT_PS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 * - in case of exception or level-1 interrupt it's in the PS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	 *   and is already saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	 * - in case of medium level interrupt it's in the excsave2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	movi	a0, EXCCAUSE_MAPPED_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	beq	a2, a0, .Lmedium_level_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) .Lmedium_level_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	rsr	a0, excsave2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	bgei	a3, LOCKLEVEL, .Lexception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) .Llevel1_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	movi	a3, LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) .Lexception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	movi	a0, PS_WOE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	or	a3, a3, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	movi	a0, LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 					# a3 = PS.INTLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	movi	a2, PS_WOE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	or	a3, a3, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	rsr	a2, exccause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/* restore return address (or 0 if return to userspace) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	rsr	a0, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	wsr	a3, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	rsync				# PS.WOE => rsync => overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	/* Save lbeg, lend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) #if XCHAL_HAVE_LOOPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	rsr	a4, lbeg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	rsr	a3, lend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	s32i	a4, a1, PT_LBEG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	s32i	a3, a1, PT_LEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* Save SCOMPARE1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) #if XCHAL_HAVE_S32C1I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	rsr     a3, scompare1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	s32i    a3, a1, PT_SCOMPARE1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* Save optional registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	/* Go to second-level dispatcher. Set up parameters to pass to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 * exception handler and call the exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	rsr	a4, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	mov	a6, a1			# pass stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	mov	a7, a2			# pass EXCCAUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	addx4	a4, a2, a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	/* Call the second-level handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	callx4	a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/* Jump here for exception exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	.global common_exception_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) common_exception_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) #if XTENSA_FAKE_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	l32i	a2, a1, PT_EXCCAUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	movi	a3, EXCCAUSE_MAPPED_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	beq	a2, a3, .LNMIexit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	irq_save a2, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	call4	trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	/* Jump if we are returning from kernel exceptions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	l32i	a3, a1, PT_PS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	GET_THREAD_INFO(a2, a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	l32i	a4, a2, TI_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	_bbci.l	a3, PS_UM_BIT, 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	/* Specific to a user exception exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 * We need to check some flags for signal handling and rescheduling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	 * and have to restore WB and WS, extra states, and all registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	 * in the register file that were in use in the user task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 * Note that we don't disable interrupts here. 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	_bbci.l	a4, TIF_SIGPENDING, 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 2:	l32i	a4, a1, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	/* Call do_signal() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	call4	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	rsil	a2, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	mov	a6, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	call4	do_notify_resume	# int do_notify_resume(struct pt_regs*)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	j	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 3:	/* Reschedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	call4	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	rsil	a2, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	call4	schedule	# void schedule (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	j	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	_bbci.l	a4, TIF_NEED_RESCHED, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	/* Check current_thread_info->preempt_count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	l32i	a4, a2, TI_PRE_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	bnez	a4, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	call4	preempt_schedule_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	j	4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) #if XTENSA_FAKE_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) .LNMIexit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	l32i	a3, a1, PT_PS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	_bbci.l	a3, PS_UM_BIT, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	_bbci.l	a4, TIF_DB_DISABLED, 7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	call4	restore_dbreak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) #ifdef CONFIG_DEBUG_TLB_SANITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	l32i	a4, a1, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	call4	check_tlb_sanity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	extui	a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	bgei	a4, LOCKLEVEL, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	call4	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	/* Restore optional registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	/* Restore SCOMPARE1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) #if XCHAL_HAVE_S32C1I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	l32i    a2, a1, PT_SCOMPARE1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	wsr     a2, scompare1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	wsr	a3, ps		/* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) user_exception_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	/* Restore the state of the task and return from the exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	l32i	a2, a1, PT_WINDOWBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	l32i	a3, a1, PT_WINDOWSTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	wsr	a1, depc		# use DEPC as temp storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	wsr	a3, windowstart		# restore WINDOWSTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	ssr	a2			# preserve user's WB in the SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	wsr	a2, windowbase		# switch to user's saved WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	rsr	a1, depc		# restore stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	rotw	-1			# we restore a4..a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	_bltui	a6, 16, 1f		# only have to restore current window?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/* The working registers are a0 and a3.  We are restoring to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * a4..a7.  Be careful not to destroy what we have just restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * Note: wmask has the format YYYYM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 *       Y: number of registers saved in groups of 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 *       M: 4 bit mask of first 16 registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	mov	a2, a6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	mov	a3, a5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 2:	rotw	-1			# a0..a3 become a4..a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	addi	a3, a7, -4*4		# next iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	addi	a2, a6, -16		# decrementing Y in WMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	l32i	a4, a3, PT_AREG_END + 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	l32i	a5, a3, PT_AREG_END + 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	l32i	a6, a3, PT_AREG_END + 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	l32i	a7, a3, PT_AREG_END + 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	_bgeui	a2, 16, 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	/* Clear unrestored registers (don't leak anything to user-land */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 1:	rsr	a0, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	rsr	a3, sar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	sub	a3, a0, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	beqz	a3, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	extui	a3, a3, 0, WBBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 1:	rotw	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	addi	a3, a7, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	movi	a4, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	movi	a5, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	movi	a6, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	movi	a7, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	bgei	a3, 1, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	/* We are back were we were when we started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	 * Note: a2 still contains WMASK (if we've returned to the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	 *	 frame where we had loaded a2), or at least the lower 4 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 *	 (if we have restored WSBITS-1 frames).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) #if XCHAL_HAVE_THREADPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	l32i	a3, a1, PT_THREADPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	wur	a3, threadptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	j	common_exception_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	/* This is the kernel exception exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 * We avoided to do a MOVSP when we entered the exception, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 * have to do it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) kernel_exception_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/* Check if we have to do a movsp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * We only have to do a movsp if the previous window-frame has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 * been spilled to the *temporary* exception stack instead of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	 * task's stack. This is the case if the corresponding bit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 * WINDOWSTART for the previous window-frame was set before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 * (not spilled) but is zero now (spilled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 * If this bit is zero, all other bits except the one for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 * current window frame are also zero. So, we can use a simple test:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * 'and' WINDOWSTART and WINDOWSTART-1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 * The result is zero only if one bit was set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * (Note: We might have gone through several task switches before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 *        we come back to the current task, so WINDOWBASE might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 *        different from the time the exception occurred.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* Test WINDOWSTART before and after the exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * We actually have WMASK, so we only have to test if it is 1 or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	l32i	a2, a1, PT_WMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	/* Test WINDOWSTART now. If spilled, do the movsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	rsr     a3, windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	addi	a0, a3, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	and     a3, a3, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	_bnez	a3, common_exception_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	addi    a0, a1, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	l32i    a3, a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	l32i    a4, a0, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	s32i    a3, a1, PT_SIZE+0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	s32i    a4, a1, PT_SIZE+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	l32i    a3, a0, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	l32i    a4, a0, 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	s32i    a3, a1, PT_SIZE+8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	s32i    a4, a1, PT_SIZE+12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	/* Common exception exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	 * We restore the special register and the current window frame, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	 * return from the exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * Note: We expect a2 to hold PT_WMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) common_exception_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	/* Restore address registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	_bbsi.l	a2, 1, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	l32i	a4,  a1, PT_AREG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	l32i	a5,  a1, PT_AREG5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	l32i	a6,  a1, PT_AREG6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	l32i	a7,  a1, PT_AREG7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	_bbsi.l	a2, 2, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	l32i	a8,  a1, PT_AREG8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	l32i	a9,  a1, PT_AREG9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	l32i	a10, a1, PT_AREG10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	l32i	a11, a1, PT_AREG11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	_bbsi.l	a2, 3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	l32i	a12, a1, PT_AREG12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	l32i	a13, a1, PT_AREG13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	l32i	a14, a1, PT_AREG14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	l32i	a15, a1, PT_AREG15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	/* Restore PC, SAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 1:	l32i	a2, a1, PT_PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	l32i	a3, a1, PT_SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	wsr	a2, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	wsr	a3, sar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	/* Restore LBEG, LEND, LCOUNT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) #if XCHAL_HAVE_LOOPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	l32i	a2, a1, PT_LBEG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	l32i	a3, a1, PT_LEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	wsr	a2, lbeg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	l32i	a2, a1, PT_LCOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	wsr	a3, lend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	wsr	a2, lcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/* We control single stepping through the ICOUNTLEVEL register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	l32i	a2, a1, PT_ICOUNTLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	movi	a3, -2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	wsr	a2, icountlevel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	wsr	a3, icount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/* Check if it was double exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	l32i	a0, a1, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	l32i	a3, a1, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	l32i	a2, a1, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/* Restore a0...a3 and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	l32i	a0, a1, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	l32i	a1, a1, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 1: 	wsr	a0, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	l32i	a0, a1, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	l32i	a1, a1, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	rfde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) ENDPROC(kernel_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  * Debug exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  * Currently, we don't support KGDB, so only user application can be debugged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  * When we get here,  a0 is trashed and saved to excsave[debuglevel]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	.literal_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) ENTRY(debug_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	/* Set EPC1 and EXCCAUSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	wsr	a2, depc		# save a2 temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	wsr	a2, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	movi	a2, EXCCAUSE_MAPPED_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	wsr	a2, exccause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	movi	a2, 1 << PS_EXCM_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	or	a2, a0, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	wsr	a2, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	/* Switch to kernel/user stack, restore jump vector, and save a0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	l32i	a0, a3, DT_DEBUG_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	s32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	s32i	a0, a2, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	movi	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	xsr	a0, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	s32i	a0, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	mov	a1, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/* Debug exception is handled as an exception, so interrupts will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * likely be enabled in the common exception handler. Disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * meaning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	GET_THREAD_INFO(a2, a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	l32i	a3, a2, TI_PRE_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	addi	a3, a3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	s32i	a3, a2, TI_PRE_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	rsr	a2, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	bbsi.l	a2, PS_UM_BIT, _user_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	j	_kernel_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 2:	rsr	a2, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	j	3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/* Debug exception while in exception mode. This may happen when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 * window overflow/underflow handler or fast exception handler hits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	 * data breakpoint, in which case save and disable all data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * breakpoints, single-step faulting instruction and restore data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 * breakpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	rsr	a0, debugcause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	.set	_index, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	.rept	XCHAL_NUM_DBREAK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	wsr	a0, SREG_DBREAKC + _index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	.set	_index, _index + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	wsr	a0, icountlevel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	l32i	a0, a3, DT_ICOUNT_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	xsr	a0, icount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	l32i	a0, a3, DT_DEBUG_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	rfi	XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) .Ldebug_save_dbreak:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	.set	_index, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	.rept	XCHAL_NUM_DBREAK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	movi	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	xsr	a0, SREG_DBREAKC + _index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	.set	_index, _index + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	movi	a0, XCHAL_EXCM_LEVEL + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	xsr	a0, icountlevel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	movi	a0, 0xfffffffe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	xsr	a0, icount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	s32i	a0, a3, DT_ICOUNT_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	l32i	a0, a3, DT_DEBUG_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	rfi	XCHAL_DEBUGLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	/* Debug exception while in exception mode. Should not happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 1:	j	1b	// FIXME!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) ENDPROC(debug_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * We get here in case of an unrecoverable exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * The only thing we can do is to be nice and print a panic message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * We only produce a single stack frame for panic, so ???
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * Entry conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *   - a0 contains the caller address; original value saved in excsave1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  *   - the original a0 contains a valid return address (backtrace) or 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  *   - a2 contains a valid stackpointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  *   - If the stack pointer could be invalid, the caller has to setup a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  *     dummy stack pointer (e.g. the stack of the init_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  *   - If the return address could be invalid, the caller has to set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  *     to 0, so the backtrace would stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	.align 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) unrecoverable_text:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	.ascii "Unrecoverable error in exception handler\0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	.literal_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) ENTRY(unrecoverable_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	movi	a0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	movi	a1, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	wsr	a0, windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	wsr	a1, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	movi	a1, PS_WOE_MASK | LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	wsr	a1, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	movi	a1, init_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	movi	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	addi	a1, a1, PT_REGS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	movi	a6, unrecoverable_text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	call4	panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 1:	j	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) ENDPROC(unrecoverable_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	__XTENSA_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	.literal_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  * Fast-handler for alloca exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  *  The ALLOCA handler is entered when user code executes the MOVSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *  instruction and the caller's frame is not in the register file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  * It leverages the existing window spill/fill routines and their support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * double exceptions. The 'movsp' instruction will only cause an exception if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * the next window needs to be loaded. In fact this ALLOCA exception may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * replaced at some point by changing the hardware to do a underflow exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * of the proper size instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * This algorithm simply backs out the register changes started by the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  * exception handler, makes it appear that we have started a window underflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  * by rotating the window back and then setting the old window base (OWB) in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * the 'ps' register with the rolled back window base. The 'movsp' instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  * will be re-executed and this time since the next window frames is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  * active AR registers it won't cause an exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  * If the WindowUnderflow code gets a TLB miss the page will get mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * the partial WindowUnderflow will be handled in the double exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) ENTRY(fast_alloca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	rsr	a0, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	rotw	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	rsr	a2, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	xor	a3, a3, a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	l32i	a4, a6, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	l32i	a1, a6, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	rsr	a6, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	wsr	a1, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	slli	a3, a3, PS_OWB_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	xor	a2, a2, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	wsr	a2, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	_bbci.l	a4, 31, 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	rotw	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	_bbci.l	a8, 30, 8f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	rotw	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	j	_WindowUnderflow12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 8:	j	_WindowUnderflow8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 4:	j	_WindowUnderflow4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ENDPROC(fast_alloca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #ifdef CONFIG_USER_ABI_CALL0_PROBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * fast illegal instruction handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * This is used to fix up user PS.WOE on the exception caused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * by the first opcode related to register window. If PS.WOE is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * already set it goes directly to the common user exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ENTRY(fast_illegal_instruction_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	rsr	a0, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	bbsi.l	a0, PS_WOE_BIT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	movi	a3, PS_WOE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	or	a0, a0, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	wsr	a0, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	l32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	l32i	a0, a2, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	rsr	a2, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	call0	user_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ENDPROC(fast_illegal_instruction_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * fast system calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  * WARNING:  The kernel doesn't save the entire user context before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)  * handling a fast system call.  These functions are small and short,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  * usually offering some functionality not available to user tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ENTRY(fast_syscall_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	/* Skip syscall. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	rsr	a0, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	addi	a0, a0, 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	wsr	a0, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	l32i	a0, a2, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	rsr	a0, depc			# get syscall-nr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	_beqz	a0, fast_syscall_spill_registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	call0	user_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ENDPROC(fast_syscall_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ENTRY(fast_syscall_unrecoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	/* Restore all states. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	l32i    a0, a2, PT_AREG0        # restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	xsr     a2, depc                # restore a2, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	wsr     a0, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	call0	unrecoverable_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ENDPROC(fast_syscall_unrecoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * sysxtensa syscall handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  *        a2            a6                   a3    a4      a5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  *   a2:	new stack pointer, original in a0 and DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  *   a4..a15:	unchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * Note: we don't have to save a2; a2 holds the return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	.literal_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) #ifdef CONFIG_FAST_SYSCALL_XTENSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ENTRY(fast_syscall_xtensa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	s32i	a7, a2, PT_AREG7	# we need an additional register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	movi	a7, 4			# sizeof(unsigned int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/* Fall through for ATOMIC_CMP_SWP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .Lswp:	/* Atomic compare and swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) EX(.Leac) l32i	a0, a3, 0		# read old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	bne	a0, a4, 1f		# same as old value? jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) EX(.Leac) s32i	a5, a3, 0		# different, modify value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	l32i	a7, a2, PT_AREG7	# restore a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	l32i	a0, a2, PT_AREG0	# restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	movi	a2, 1			# and return 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 1:	l32i	a7, a2, PT_AREG7	# restore a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	l32i	a0, a2, PT_AREG0	# restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	movi	a2, 0			# return 0 (note that we cannot set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .Lnswp:	/* Atomic set, add, and exg_add. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) EX(.Leac) l32i	a7, a3, 0		# orig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	add	a0, a4, a7		# + arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	moveqz	a0, a4, a6		# set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) EX(.Leac) s32i	a0, a3, 0		# write new value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	mov	a0, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	mov	a2, a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	l32i	a7, a0, PT_AREG7	# restore a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	l32i	a0, a0, PT_AREG0	# restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .Leac:	l32i	a7, a2, PT_AREG7	# restore a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	l32i	a0, a2, PT_AREG0	# restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	movi	a2, -EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) .Lill:	l32i	a7, a2, PT_AREG7	# restore a7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	l32i	a0, a2, PT_AREG0	# restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	movi	a2, -EINVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ENDPROC(fast_syscall_xtensa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) #else /* CONFIG_FAST_SYSCALL_XTENSA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ENTRY(fast_syscall_xtensa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	l32i    a0, a2, PT_AREG0        # restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	movi	a2, -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ENDPROC(fast_syscall_xtensa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) #endif /* CONFIG_FAST_SYSCALL_XTENSA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* fast_syscall_spill_registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) #ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ENTRY(fast_syscall_spill_registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	/* Register a FIXUP handler (pass current wb as a parameter) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	xsr	a3, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	movi	a0, fast_syscall_spill_registers_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	s32i	a0, a3, EXC_TABLE_FIXUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	rsr	a0, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	s32i	a0, a3, EXC_TABLE_PARAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	xsr	a3, excsave1		# restore a3 and excsave_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	/* Save a3, a4 and SAR on stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	rsr	a0, sar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	s32i	a0, a2, PT_SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	s32i	a4, a2, PT_AREG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	s32i	a7, a2, PT_AREG7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	s32i	a8, a2, PT_AREG8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	s32i	a11, a2, PT_AREG11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	s32i	a12, a2, PT_AREG12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	s32i	a15, a2, PT_AREG15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * Rotate ws so that the current windowbase is at bit 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * Assume ws = xxxwww1yy (www1 current window frame).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 * Rotate ws right so that a4 = yyxxxwww1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	rsr	a0, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	rsr	a3, windowstart		# a3 = xxxwww1yy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	ssr	a0			# holds WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	slli	a0, a3, WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	/* We are done if there are no more than the current register frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	movi	a0, (1 << (WSBITS-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	_beqz	a3, .Lnospill		# only one active frame? jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	/* We want 1 at the top, so that we return to the current windowbase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	or	a3, a3, a0		# 1yyxxxwww
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	wsr	a3, windowstart		# save shifted windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	neg	a0, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	and	a3, a0, a3		# first bit set from right: 000010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	ffs_ws	a0, a3			# a0: shifts to skip empty frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	movi	a3, WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	ssr	a0			# save in SAR for later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	rsr	a3, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	add	a3, a3, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	wsr	a3, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	rsr	a3, windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	srl	a3, a3			# shift windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	/* WB is now just one frame below the oldest frame in the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	   and WS differ by one 4-register frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	/* Save frames. Depending what call was used (call4, call8, call12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 * we have to save 4,8. or 12 registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) .Lloop: _bbsi.l	a3, 1, .Lc4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	_bbci.l	a3, 2, .Lc12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) .Lc8:	s32e	a4, a13, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	l32e	a4, a5, -12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	s32e	a8, a4, -32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	s32e	a5, a13, -12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	s32e	a6, a13, -8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	s32e	a7, a13, -4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	s32e	a9, a4, -28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	s32e	a10, a4, -24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	s32e	a11, a4, -20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	srli	a11, a3, 2		# shift windowbase by 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	rotw	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	_bnei	a3, 1, .Lloop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	j	.Lexit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .Lc4:	s32e	a4, a9, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	s32e	a5, a9, -12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	s32e	a6, a9, -8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	s32e	a7, a9, -4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	srli	a7, a3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	rotw	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	_bnei	a3, 1, .Lloop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	j	.Lexit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) .Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/* 12-register frame (call12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	l32e	a0, a5, -12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	s32e	a8, a0, -48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	mov	a8, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	s32e	a9, a8, -44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	s32e	a10, a8, -40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	s32e	a11, a8, -36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	s32e	a12, a8, -32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	s32e	a13, a8, -28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	s32e	a14, a8, -24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	s32e	a15, a8, -20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	srli	a15, a3, 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	/* The stack pointer for a4..a7 is out of reach, so we rotate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 * window, grab the stackpointer, and rotate back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	 * Alternatively, we could also use the following approach, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	 * makes the fixup routine much more complicated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	 * rotw	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	 * s32e	a0, a13, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	 * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	 * rotw 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	rotw	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	mov	a4, a13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	rotw	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	s32e	a4, a8, -16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	s32e	a5, a8, -12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	s32e	a6, a8, -8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	s32e	a7, a8, -4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	rotw	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	_beqi	a3, 1, .Lexit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	j	.Lloop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) .Lexit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	/* Done. Do the final rotation and set WS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	rotw	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	rsr	a3, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	ssl	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	movi	a3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	sll	a3, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	wsr	a3, windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) .Lnospill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	/* Advance PC, restore registers and SAR, and return from exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	l32i	a3, a2, PT_SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	l32i	a0, a2, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	wsr	a3, sar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	l32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	/* Restore clobbered registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	l32i	a4, a2, PT_AREG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	l32i	a7, a2, PT_AREG7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	l32i	a8, a2, PT_AREG8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	l32i	a11, a2, PT_AREG11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	l32i	a12, a2, PT_AREG12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	l32i	a15, a2, PT_AREG15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	movi	a2, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) .Linvalid_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	/* We get here because of an unrecoverable error in the window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	 * registers, so set up a dummy frame and kill the user application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	movi	a0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	movi	a1, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	wsr	a0, windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	wsr	a1, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	movi	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	rsr	a3, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	l32i	a1, a3, EXC_TABLE_KSTK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	movi	a4, PS_WOE_MASK | LOCKLEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	wsr	a4, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	movi	a6, SIGSEGV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	call4	do_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	/* shouldn't return, so panic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	wsr	a0, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	call0	unrecoverable_exception		# should not return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 1:	j	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ENDPROC(fast_syscall_spill_registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /* Fixup handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * We get here if the spill routine causes an exception, e.g. tlb miss.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * We basically restore WINDOWBASE and WINDOWSTART to the condition when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  * we entered the spill routine and jump to the user exception handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)  * Note that we only need to restore the bits in windowstart that have not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  * been spilled yet by the _spill_register routine. Luckily, a3 contains a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  * rotated windowstart with only those bits set for frames that haven't been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  * spilled yet. Because a3 is rotated such that bit 0 represents the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  * frame for the current windowbase - 1, we need to rotate a3 left by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  * value of the current windowbase + 1 and move it to windowstart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * a0: value of depc, original value in depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * a3: exctable, original value in excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ENTRY(fast_syscall_spill_registers_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	rsr	a2, windowbase	# get current windowbase (a2 is saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	xsr	a0, depc	# restore depc and a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	ssl	a2		# set shift (32 - WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	/* We need to make sure the current registers (a0-a3) are preserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	 * To do this, we simply set the bit for the current window frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	 * in WS, so that the exception handlers save them to the task stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	 * Note: we use a3 to set the windowbase, so we take a special care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	 * of it, saving it in the original _spill_registers frame across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	 * the exception handler call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	xsr	a3, excsave1	# get spill-mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	slli	a3, a3, 1	# shift left by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	addi	a3, a3, 1	# set the bit for the current window frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	slli	a2, a3, 32-WSBITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	wsr	a2, windowstart	# set corrected windowstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	srli	a3, a3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	rsr	a2, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	xsr	a2, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	xsr	a2, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	/* Return to the original (user task) WINDOWBASE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	 * We leave the following frame behind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	 * a0, a1, a2	same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	 * depc:	depc (we have to return to that address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	 * excsave_1:	exctable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	wsr	a3, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	/* We are now in the original frame when we entered _spill_registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	 *  a0: return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	 *  a1: used, stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	 *  a2: kernel stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	 *  a3: available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	 *  depc: exception address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	 *  excsave: exctable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	 * Note: This frame might be the same as above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	/* Setup stack pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	addi	a2, a2, -PT_USER_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	s32i	a0, a2, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	/* Make sure we return to this fixup handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	movi	a3, fast_syscall_spill_registers_fixup_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	s32i	a3, a2, PT_DEPC		# setup depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	/* Jump to the exception handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	rsr	a3, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	rsr	a0, exccause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	addx4	a0, a0, a3              	# find entry in table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	jx	a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ENDPROC(fast_syscall_spill_registers_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ENTRY(fast_syscall_spill_registers_fixup_return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	/* When we return here, all registers have been restored (a2: DEPC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	wsr	a2, depc		# exception address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	/* Restore fixup handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	rsr	a2, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	movi	a3, fast_syscall_spill_registers_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	s32i	a3, a2, EXC_TABLE_FIXUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	rsr	a3, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	s32i	a3, a2, EXC_TABLE_PARAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	l32i	a2, a2, EXC_TABLE_KSTK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	/* Load WB at the time the exception occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	rsr	a3, sar			# WB is still in SAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	neg	a3, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	wsr	a3, windowbase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	rsr	a3, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	rfde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ENDPROC(fast_syscall_spill_registers_fixup_return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ENTRY(fast_syscall_spill_registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	l32i    a0, a2, PT_AREG0        # restore a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	movi	a2, -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) ENDPROC(fast_syscall_spill_registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  * We should never get here. Bail out!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ENTRY(fast_second_level_miss_double_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	call0	unrecoverable_exception		# should not return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 1:	j	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ENDPROC(fast_second_level_miss_double_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* First-level entry handler for user, kernel, and double 2nd-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  * TLB miss exceptions.  Note that for now, user and kernel miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  * exceptions share the same entry point and are handled identically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  * An old, less-efficient C version of this function used to exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * We include it below, interleaved as comments, for reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) ENTRY(fast_second_level_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	/* Save a1 and a3. Note: we don't expect a double exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	s32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	/* We need to map the page of PTEs for the user task.  Find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	 * the pointer to that page.  Also, it's possible for tsk->mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	 * to be NULL while tsk->active_mm is nonzero if we faulted on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	 * a vmalloc address.  In that rare case, we must use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	 * active_mm instead to avoid a fault in this handler.  See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	 *   (or search Internet on "mm vs. active_mm")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 *	if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	 *		mm = tsk->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	 *	pgd = pgd_offset (mm, regs->excvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	 *	pmd = pmd_offset (pgd, regs->excvaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	 *	pmdval = *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	GET_CURRENT(a1,a2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	l32i	a0, a1, TASK_MM		# tsk->mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	beqz	a0, 9f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 8:	rsr	a3, excvaddr		# fault address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	_PGD_OFFSET(a0, a3, a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	l32i	a0, a0, 0		# read pmdval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	beqz	a0, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	/* Read ptevaddr and convert to top of page-table page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	 * 	vpnval += DTLB_WAY_PGTABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	 *	write_dtlb_entry (pteval, vpnval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 * The messy computation for 'pteval' above really simplifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 * into the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 *                 | PAGE_DIRECTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	add	a0, a0, a1		# pmdval - PAGE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	xor	a0, a0, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	movi	a1, _PAGE_DIRECTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	or	a0, a0, a1		# ... | PAGE_DIRECTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	 * We utilize all three wired-ways (7-9) to hold pmd translations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	 * This allows to map the three most common regions to three different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	 * DTLBs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	 *  2   -> way 8	shared libaries (2000.0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	 *  3   -> way 0	stack (3000.0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	rsr	a1, ptevaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	addx2	a3, a3, a3		# ->			0,3,6,9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	srli	a1, a1, PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	extui	a3, a3, 2, 2		# ->			0,0,1,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	addi	a3, a3, DTLB_WAY_PGD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	add	a1, a1, a3		# ... + way_number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 3:	wdtlb	a0, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	dsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	/* Exit critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 4:	rsr	a3, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	movi	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	s32i	a0, a3, EXC_TABLE_FIXUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	/* Restore the working registers, and return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	l32i	a0, a2, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	l32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	l32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	l32i	a2, a2, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	/* Restore excsave1 and return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	rsr	a2, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	/* Return from double exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 1:	xsr	a2, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	esync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	rfde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	bnez	a0, 8b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	/* Even more unlikely case active_mm == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	 * We can get here with NMI in the middle of context_switch that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	 * touches vmalloc area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	movi	a0, init_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	j	8b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) #if (DCACHE_WAY_SIZE > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 2:	/* Special case for cache aliasing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	 * We (should) only get here if a clear_user_page, copy_user_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	 * or the aliased cache flush functions got preemptively interrupted 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	 * by another task. Re-establish temporary mapping to the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	 * TLBTEMP_BASE areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	/* We shouldn't be in a double exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	l32i	a0, a2, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	/* Make sure the exception originated in the special functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	movi	a0, __tlbtemp_mapping_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	rsr	a3, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	bltu	a3, a0, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	movi	a0, __tlbtemp_mapping_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	bgeu	a3, a0, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	movi	a3, TLBTEMP_BASE_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	rsr	a0, excvaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	bltu	a0, a3, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	addi	a1, a0, -TLBTEMP_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	bgeu	a1, a3, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	/* Check if we have to restore an ITLB mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	movi	a1, __tlbtemp_mapping_itlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	rsr	a3, epc1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	sub	a3, a3, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/* Calculate VPN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	movi	a1, PAGE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	and	a1, a1, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	/* Jump for ITLB entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	bgez	a3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	add	a1, a3, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	mov	a0, a6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	movnez	a0, a7, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	j	3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	/* ITLB entry. We only use dst in a6. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 1:	witlb	a6, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	j	4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) #endif	// DCACHE_WAY_SIZE > PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 2:	/* Invalid PGD, default exception handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	rsr	a1, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	s32i	a1, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	mov	a1, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	rsr	a2, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	bbsi.l	a2, PS_UM_BIT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	call0	_kernel_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 1:	call0	_user_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ENDPROC(fast_second_level_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  * StoreProhibitedException
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * Update the pte and invalidate the itlb mapping for this pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  * Entry condition:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  *   a0:	trashed, original value saved on stack (PT_AREG0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  *   a1:	a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  *   a2:	new stack pointer, original in DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)  *   a3:	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)  *   depc:	a2, original value saved on stack (PT_DEPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)  *   excsave_1:	dispatch table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ENTRY(fast_store_prohibited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	/* Save a1 and a3. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	s32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	s32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	GET_CURRENT(a1,a2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	l32i	a0, a1, TASK_MM		# tsk->mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	beqz	a0, 9f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 8:	rsr	a1, excvaddr		# fault address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	_PGD_OFFSET(a0, a1, a3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	l32i	a0, a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	beqz	a0, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	_PTE_OFFSET(a0, a1, a3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	l32i	a3, a0, 0		# read pteval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	movi	a1, _PAGE_CA_INVALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	ball	a3, a1, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	bbci.l	a3, _PAGE_WRITABLE_BIT, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	or	a3, a3, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	rsr	a1, excvaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	s32i	a3, a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	/* We need to flush the cache if we have page coloring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	dhwb	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	pdtlb	a0, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	wdtlb	a3, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	/* Exit critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	movi	a0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	rsr	a3, excsave1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	s32i	a0, a3, EXC_TABLE_FIXUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	/* Restore the working registers, and return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	l32i	a3, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	l32i	a1, a2, PT_AREG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	l32i	a0, a2, PT_AREG0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	l32i	a2, a2, PT_DEPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	rsr	a2, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	rfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	/* Double exception. Restore FIXUP handler and return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 1:	xsr	a2, depc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	esync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	rfde
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	j	8b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 2:	/* If there was a problem, handle fault in C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	rsr	a3, depc	# still holds a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	s32i	a3, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	mov	a1, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	rsr	a2, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	bbsi.l	a2, PS_UM_BIT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	call0	_kernel_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 1:	call0	_user_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) ENDPROC(fast_store_prohibited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)  * System Calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)  * void system_call (struct pt_regs* regs, int exccause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)  *                            a2                 a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	.literal_position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) ENTRY(system_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	abi_entry_default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	/* regs->syscall = regs->areg[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	l32i	a7, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	s32i	a7, a2, PT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	GET_THREAD_INFO(a4, a1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	l32i	a3, a4, TI_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	movi	a4, _TIF_WORK_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	and	a3, a3, a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	beqz	a3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	mov	a6, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	call4	do_syscall_trace_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	beqz	a6, .Lsyscall_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	l32i	a7, a2, PT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	/* syscall = sys_call_table[syscall_nr] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	movi	a4, sys_call_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	movi	a5, __NR_syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	movi	a6, -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	bgeu	a7, a5, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	addx4	a4, a7, a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	l32i	a4, a4, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	/* Load args: arg0 - arg5 are passed via regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	l32i	a6, a2, PT_AREG6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	l32i	a7, a2, PT_AREG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	l32i	a8, a2, PT_AREG4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	l32i	a9, a2, PT_AREG5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	l32i	a10, a2, PT_AREG8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	l32i	a11, a2, PT_AREG9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	callx4	a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 1:	/* regs->areg[2] = return_value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	s32i	a6, a2, PT_AREG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	bnez	a3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) .Lsyscall_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	abi_ret_default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	mov	a6, a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	call4	do_syscall_trace_leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	abi_ret_default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) ENDPROC(system_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)  * Spill live registers on the kernel stack macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)  * Entry condition: ps.woe is set, ps.excm is cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)  * Exit condition: windowstart has single bit set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)  * May clobber: a12, a13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	.macro	spill_registers_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) #if XCHAL_NUM_AREGS > 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	call12	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	_j	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	retw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	.align	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	_entry	a1, 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	addi	a12, a0, 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) #if XCHAL_NUM_AREGS > 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	.rept	(XCHAL_NUM_AREGS - 32) / 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	_entry	a1, 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	mov	a12, a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	_entry	a1, 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) #if XCHAL_NUM_AREGS % 12 == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	mov	a8, a8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) #elif XCHAL_NUM_AREGS % 12 == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	mov	a12, a12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) #elif XCHAL_NUM_AREGS % 12 == 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	mov	a4, a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	retw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	mov	a12, a12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  * Task switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)  * struct task*  _switch_to (struct task* prev, struct task* next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)  *         a2                              a2                 a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) ENTRY(_switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	abi_entry(XTENSA_SPILL_STACK_RESERVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	mov	a11, a3			# and 'next' (a3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	l32i	a4, a2, TASK_THREAD_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	l32i	a5, a3, TASK_THREAD_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) #if THREAD_RA > 1020 || THREAD_SP > 1020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	addi	a10, a2, TASK_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	s32i	a0, a2, THREAD_RA	# save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	s32i	a1, a2, THREAD_SP	# save stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	movi	a6, __stack_chk_guard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	l32i	a8, a3, TASK_STACK_CANARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	s32i	a8, a6, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	/* Disable ints while we manipulate the stack pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	irq_save a14, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	/* Switch CPENABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	l32i	a3, a5, THREAD_CPENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	xsr	a3, cpenable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	s32i	a3, a4, THREAD_CPENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) #if XCHAL_HAVE_EXCLUSIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	l32i	a3, a5, THREAD_ATOMCTL8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	getex	a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	s32i	a3, a4, THREAD_ATOMCTL8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	/* Flush register file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	spill_registers_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	/* Set kernel stack (and leave critical section)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	 * Note: It's save to set it here. The stack will not be overwritten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	 *       because the kernel stack will only be loaded again after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	 *       we return from kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	rsr	a3, excsave1		# exc_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	addi	a7, a5, PT_REGS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	s32i	a7, a3, EXC_TABLE_KSTK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	/* restore context of the task 'next' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	l32i	a0, a11, THREAD_RA	# restore return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	l32i	a1, a11, THREAD_SP	# restore stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	wsr	a14, ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	rsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	abi_ret(XTENSA_SPILL_STACK_RESERVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) ENDPROC(_switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) ENTRY(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	/* void schedule_tail (struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	 * Note: prev is still in a6 (return value from fake call4 frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	call4	schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	mov	a6, a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	call4	do_syscall_trace_leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	j	common_exception_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) ENDPROC(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  * Kernel thread creation helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  *           left from _switch_to: a6 = prev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) ENTRY(ret_from_kernel_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	call4	schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	mov	a6, a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	callx4	a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	j	common_exception_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) ENDPROC(ret_from_kernel_thread)