^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * rtrap.S: Preparing for return from trap on Sparc V9.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/asi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/pstate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/spitfire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/visasm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifdef CONFIG_CONTEXT_TRACKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) # define SCHEDULE_USER schedule_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) # define SCHEDULE_USER schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .align 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __handle_preemption:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) call SCHEDULE_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 661: wrpr %g0, RTRAP_PSTATE, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* If userspace is using ADI, it could potentially pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * a pointer with version tag embedded in it. To maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * the ADI security, we must re-enable PSTATE.mcde before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * we continue execution in the kernel for another thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .section .sun_m7_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ba,pt %xcc, __handle_preemption_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __handle_user_windows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) call fault_in_user_windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 661: wrpr %g0, RTRAP_PSTATE, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* If userspace is using ADI, it could potentially pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * a pointer with version tag embedded in it. To maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * the ADI security, we must re-enable PSTATE.mcde before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * we continue execution in the kernel for another thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .section .sun_m7_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ba,pt %xcc, __handle_preemption_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __handle_userfpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) rd %fprs, %l5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) andcc %l5, FPRS_FEF, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) sethi %hi(TSTATE_PEF), %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) be,a,pn %icc, __handle_userfpu_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) andn %l1, %o0, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ba,a,pt %xcc, __handle_userfpu_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __handle_signal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mov %l5, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mov %l0, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) call do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 661: wrpr %g0, RTRAP_PSTATE, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* If userspace is using ADI, it could potentially pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * a pointer with version tag embedded in it. To maintain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * the ADI security, we must re-enable PSTATE.mcde before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * we continue execution in the kernel for another thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .section .sun_m7_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Signal delivery can modify pt_regs tstate, so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * reload it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) sethi %hi(0xf << 20), %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) and %l1, %l4, %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) andn %l1, %l4, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ba,pt %xcc, __handle_preemption_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) srl %l4, 20, %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* When returning from a NMI (%pil==15) interrupt we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * avoid running softirqs, doing IRQ tracing, preempting, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .globl rtrap_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) sethi %hi(0xf << 20), %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) and %l1, %l4, %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) andn %l1, %l4, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) srl %l4, 20, %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ba,pt %xcc, rtrap_no_irq_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Do not actually set the %pil here. We will do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * below after we clear PSTATE_IE in the %pstate register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * If we re-enable interrupts here, we can recurse down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * the hardirq stack potentially endlessly, causing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * stack overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .align 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) rtrap_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) rtrap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rtrap_xcall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) sethi %hi(0xf << 20), %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) and %l1, %l4, %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) andn %l1, %l4, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) srl %l4, 20, %l4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) brnz,pn %l4, rtrap_no_irq_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) call trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Do not actually set the %pil here. We will do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * below after we clear PSTATE_IE in the %pstate register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * If we re-enable interrupts here, we can recurse down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * the hardirq stack potentially endlessly, causing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * stack overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * It is tempting to put this test and trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * call at the 'rt_continue' label, but that will not work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * as that path hits unconditionally and we do not want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * execute this in NMI return paths, for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rtrap_no_irq_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) andcc %l1, TSTATE_PRIV, %l3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bne,pn %icc, to_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* We must hold IRQs off and atomically test schedule+signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * state, then hold them off all the way back to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * If we are returning to kernel, none of this matters. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * that we are disabling interrupts via PSTATE_IE, not using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * %pil.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * If we do not do this, there is a window where we would do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * the tests, later the signal/resched event arrives but we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * not process it since we are still in kernel mode. It would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * take until the next local IRQ before the signal/resched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * event would be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * This also means that if we have to deal with user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * windows, we have to redo all of these sched+signal checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * with IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) wrpr 0, %pil
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __handle_preemption_continue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ldx [%g6 + TI_FLAGS], %l0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) sethi %hi(_TIF_USER_WORK_MASK), %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) or %o0, %lo(_TIF_USER_WORK_MASK), %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) andcc %l0, %o0, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) sethi %hi(TSTATE_PEF), %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) be,pt %xcc, user_nowork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) andcc %l1, %o0, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) andcc %l0, _TIF_NEED_RESCHED, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bne,pn %xcc, __handle_preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bne,pn %xcc, __handle_signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ldub [%g6 + TI_WSAVED], %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) brnz,pn %o2, __handle_user_windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) sethi %hi(TSTATE_PEF), %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) andcc %l1, %o0, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* This fpdepth clear is necessary for non-syscall rtraps only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) user_nowork:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bne,pn %xcc, __handle_userfpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) stb %g0, [%g6 + TI_FPDEPTH]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) __handle_userfpu_continue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) brz,pt %l3, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mov %g6, %l2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Must do this before thread reg is clobbered below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Normal globals are restored, go to trap globals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .section .sun4v_2insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) SET_GL(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mov %l2, %g6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) wr %o3, %g0, %y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) wrpr %l4, 0x0, %pil
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) wrpr %g0, 0x1, %tl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) andn %l1, TSTATE_SYSCALL, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) wrpr %l1, %g0, %tstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) wrpr %l2, %g0, %tpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) wrpr %o2, %g0, %tnpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) brnz,pn %l3, kern_rtt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mov PRIMARY_CONTEXT, %l7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 661: ldxa [%l7 + %l7] ASI_DMMU, %l0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .section .sun4v_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ldxa [%l7 + %l7] ASI_MMU, %l0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) sethi %hi(sparc64_kern_pri_nuc_bits), %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) or %l0, %l1, %l0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 661: stxa %l0, [%l7] ASI_DMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .section .sun4v_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) stxa %l0, [%l7] ASI_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) sethi %hi(KERNBASE), %l7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) flush %l7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rdpr %wstate, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rdpr %otherwin, %l2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) srl %l1, 3, %l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 661: wrpr %l2, %g0, %canrestore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .section .fast_win_ctrl_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .word 0x89880000 ! normalw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) wrpr %l1, %g0, %wstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) brnz,pt %l2, user_rtt_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 661: wrpr %g0, %g0, %otherwin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .section .fast_win_ctrl_1insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ldx [%g6 + TI_FLAGS], %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) wr %g0, ASI_AIUP, %asi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rdpr %cwp, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) andcc %g3, _TIF_32BIT, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sub %g1, 1, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bne,pt %xcc, user_rtt_fill_32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) wrpr %g1, %cwp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ba,a,pt %xcc, user_rtt_fill_64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) user_rtt_fill_fixup_dax:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ba,pt %xcc, user_rtt_fill_fixup_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mov 1, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) user_rtt_fill_fixup_mna:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ba,pt %xcc, user_rtt_fill_fixup_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) mov 2, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) user_rtt_fill_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ba,pt %xcc, user_rtt_fill_fixup_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) clr %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) user_rtt_pre_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) add %g1, 1, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) wrpr %g1, 0x0, %cwp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) user_rtt_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rdpr %canrestore, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) wrpr %g1, 0x0, %cleanwin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) kern_rtt: rdpr %canrestore, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) brz,pn %g1, kern_rtt_fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kern_rtt_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) to_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ldsw [%g6 + TI_PRE_COUNT], %l5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) brnz %l5, kern_fpucheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ldx [%g6 + TI_FLAGS], %l5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) andcc %l5, _TIF_NEED_RESCHED, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) be,pt %xcc, kern_fpucheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cmp %l4, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) bne,pn %xcc, kern_fpucheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) call preempt_schedule_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ba,pt %xcc, rtrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) brz,pt %l5, rt_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) srl %l5, 1, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) add %g6, TI_FPSAVED, %l6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ldub [%l6 + %o0], %l2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) sub %l5, 2, %l5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) add %g6, TI_GSR, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) andcc %l2, (FPRS_FEF|FPRS_DU), %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) be,pt %icc, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) and %l2, FPRS_DL, %l6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) andcc %l2, FPRS_FEF, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) be,pn %icc, 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) sll %o0, 3, %o5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) rd %fprs, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) wr %g1, FPRS_FEF, %fprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ldx [%o1 + %o5], %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) add %g6, TI_XFSR, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) sll %o0, 8, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) add %g6, TI_FPREGS, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) brz,pn %l6, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) add %g6, TI_FPREGS+0x40, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ldda [%o3 + %o2] ASI_BLK_P, %f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ldda [%o4 + %o2] ASI_BLK_P, %f16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 1: andcc %l2, FPRS_DU, %g0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) be,pn %icc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) wr %g1, 0, %gsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) add %o2, 0x80, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ldda [%o3 + %o2] ASI_BLK_P, %f32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ldda [%o4 + %o2] ASI_BLK_P, %f48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 1: membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ldx [%o1 + %o5], %fsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 2: stb %l5, [%g6 + TI_FPDEPTH]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ba,pt %xcc, rt_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 5: wr %g0, FPRS_FEF, %fprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) sll %o0, 8, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) add %g6, TI_FPREGS+0x80, %o3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) add %g6, TI_FPREGS+0xc0, %o4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ldda [%o3 + %o2] ASI_BLK_P, %f32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ldda [%o4 + %o2] ASI_BLK_P, %f48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) wr %g0, FPRS_DU, %fprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ba,pt %xcc, rt_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) stb %l5, [%g6 + TI_FPDEPTH]