^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PowerPC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Adapted for Power Macintosh by Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Low-level exception handlers and MMU support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * rewritten by Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This file contains the system call entry code, context switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * code, and exception/interrupt return code for PowerPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/code-patching-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/tm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/asm-compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/exception-64s.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/exception-64e.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/kup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * System calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .section ".toc","aw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) SYS_CALL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .tc sys_call_table[TC],sys_call_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) COMPAT_SYS_CALL_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .tc compat_sys_call_table[TC],compat_sys_call_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* This value is used to mark exception frames on the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) exception_marker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .section ".text"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .align 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .macro system_call_vectored name trapnr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .globl system_call_vectored_\name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) system_call_vectored_\name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bne .Ltabort_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) END_FTR_SECTION_IFSET(CPU_FTR_TM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) SCV_INTERRUPT_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mr r10,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ld r1,PACAKSAVE(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) std r10,0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) std r11,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) std r12,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) std r0,GPR0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) std r10,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) std r2,GPR2(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ld r2,PACATOC(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mfcr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) li r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Can we avoid saving r3-r8 in common case? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) std r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) std r4,GPR4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) std r5,GPR5(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) std r6,GPR6(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) std r7,GPR7(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) std r8,GPR8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Zero r9-r12, this should only be required when restoring all GPRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) std r11,GPR9(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) std r11,GPR10(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) std r11,GPR11(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) std r11,GPR12(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) std r9,GPR13(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) std r11,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) std r11,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) std r11,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) li r11,\trapnr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) std r11,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) std r12,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) std r3,ORIG_GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) addi r10,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ld r11,exception_marker@toc(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) std r11,-16(r10) /* "regshere" marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) HMT_MEDIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * would clobber syscall parameters. Also we always enter with IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * enabled and nothing pending. system_call_exception() will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * trace_hardirqs_off().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Calling convention has r9 = orig r0, r10 = regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) mr r9,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bl system_call_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .Lsyscall_vectored_\name\()_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) addi r4,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) li r5,1 /* scv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bl syscall_exit_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ld r2,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ld r4,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ld r5,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) stdcx. r0,0,r1 /* to clear the reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) HMT_MEDIUM_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cmpdi r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bne .Lsyscall_vectored_\name\()_restore_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* rfscv returns with LR->NIA and CTR->MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mtlr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mtctr r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Could zero these as per ABI, but we may consider a stricter ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * which preserves these if libc implementations can benefit, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * restore them for now until further measurement is done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ld r0,GPR0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ld r4,GPR4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ld r5,GPR5(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ld r6,GPR6(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ld r7,GPR7(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ld r8,GPR8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Zero volatile regs that may contain sensitive kernel data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) li r9,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) li r10,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) li r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) li r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mtspr SPRN_XER,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * We don't need to restore AMR on the way back to userspace for KUAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * The value of AMR only matters while we're in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mtcr r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ld r2,GPR2(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ld r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ld r13,GPR13(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ld r1,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) RFSCV_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .Lsyscall_vectored_\name\()_restore_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) li r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) mtmsrd r3,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mtspr SPRN_SRR0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mtspr SPRN_SRR1,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ld r3,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ld r4,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ld r5,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ld r0,GPR0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mtcr r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mtctr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) mtlr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mtspr SPRN_XER,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) REST_10GPRS(2, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) REST_2GPRS(12, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ld r1,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) RFI_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) system_call_vectored common 0x3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * which is tested by system_call_exception when r0 is -1 (as set by vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * entry code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) system_call_vectored sigill 0x7ff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Entered via kernel return set up by kernel/sstep.c, must match entry regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .globl system_call_vectored_emulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) system_call_vectored_emulate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) li r10,IRQS_ALL_DISABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) stb r10,PACAIRQSOFTMASK(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) b system_call_vectored_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .balign IFETCH_ALIGN_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .globl system_call_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) system_call_common:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) _ASM_NOKPROBE_SYMBOL(system_call_common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bne .Ltabort_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) END_FTR_SECTION_IFSET(CPU_FTR_TM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mr r10,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ld r1,PACAKSAVE(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) std r10,0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) std r11,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) std r12,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) std r0,GPR0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) std r10,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) std r2,GPR2(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #ifdef CONFIG_PPC_FSL_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) START_BTB_FLUSH_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) BTB_FLUSH(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) END_BTB_FLUSH_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ld r2,PACATOC(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mfcr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) li r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Can we avoid saving r3-r8 in common case? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) std r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) std r4,GPR4(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) std r5,GPR5(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) std r6,GPR6(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) std r7,GPR7(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) std r8,GPR8(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Zero r9-r12, this should only be required when restoring all GPRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) std r11,GPR9(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) std r11,GPR10(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) std r11,GPR11(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) std r11,GPR12(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) std r9,GPR13(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) std r11,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) std r11,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) mflr r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * This clears CR0.SO (bit 28), which is the error indication on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * return from this system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rldimi r12,r11,28,(63-28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) li r11,0xc00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) std r10,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) std r11,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) std r12,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) std r3,ORIG_GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) addi r10,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ld r11,exception_marker@toc(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) std r11,-16(r10) /* "regshere" marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * would clobber syscall parameters. Also we always enter with IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * enabled and nothing pending. system_call_exception() will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * trace_hardirqs_off().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) li r11,IRQS_ALL_DISABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) li r12,PACA_IRQ_HARD_DIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) stb r11,PACAIRQSOFTMASK(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) stb r12,PACAIRQHAPPENED(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Calling convention has r9 = orig r0, r10 = regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mr r9,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) bl system_call_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) .Lsyscall_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) addi r4,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) li r5,0 /* !scv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bl syscall_exit_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ld r2,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ld r4,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ld r5,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ld r6,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) stdcx. r0,0,r1 /* to clear the reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) mtspr SPRN_SRR0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mtspr SPRN_SRR1,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mtlr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cmpdi r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) bne .Lsyscall_restore_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Zero volatile regs that may contain sensitive kernel data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) li r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) li r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) li r6,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) li r7,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) li r8,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) li r9,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) li r10,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) li r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) li r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mtctr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mtspr SPRN_XER,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .Lsyscall_restore_regs_cont:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) HMT_MEDIUM_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * We don't need to restore AMR on the way back to userspace for KUAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * The value of AMR only matters while we're in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mtcr r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ld r2,GPR2(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ld r3,GPR3(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ld r13,GPR13(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ld r1,GPR1(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) RFI_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .Lsyscall_restore_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ld r3,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ld r4,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mtctr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) mtspr SPRN_XER,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ld r0,GPR0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) REST_8GPRS(4, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ld r12,GPR12(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) b .Lsyscall_restore_regs_cont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .Ltabort_syscall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Firstly we need to enable TM in the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) mfmsr r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) li r9, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) mtmsrd r10, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* tabort, this dooms the transaction, nothing else */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) TABORT(R9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * Return directly to userspace. We have corrupted user register state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * but userspace will never see that register state. Execution will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * resume after the tbegin of the aborted transaction with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * checkpointed register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) li r9, MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) andc r10, r10, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) mtmsrd r10, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) mtspr SPRN_SRR0, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) mtspr SPRN_SRR1, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) RFI_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) _GLOBAL(ret_from_fork_scv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) bl schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) li r3,0 /* fork() return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) b .Lsyscall_vectored_common_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) _GLOBAL(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bl schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) li r3,0 /* fork() return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) b .Lsyscall_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) _GLOBAL(ret_from_kernel_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bl schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mtctr r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mr r3,r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #ifdef PPC64_ELF_ABI_v2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) mr r12,r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) bctrl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) li r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) b .Lsyscall_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Save non-volatile GPRs, if not already saved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) _GLOBAL(save_nvgprs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ld r11,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) andi. r0,r11,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) beqlr-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) clrrdi r0,r11,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) std r0,_TRAP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) _ASM_NOKPROBE_SYMBOL(save_nvgprs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #define FLUSH_COUNT_CACHE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 1: nop; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) patch_site 1b, patch__call_flush_branch_caches1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 1: nop; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) patch_site 1b, patch__call_flush_branch_caches2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 1: nop; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) patch_site 1b, patch__call_flush_branch_caches3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .macro nops number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .rept \number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .balign 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .global flush_branch_caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) flush_branch_caches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Save LR into r9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mflr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) // Flush the link stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) .rept 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bl .+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) b 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) nops 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .balign 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Restore LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 1: mtlr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) // If we're just flushing the link stack, return here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 3: nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) patch_site 3b patch__flush_link_stack_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) li r9,0x7fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mtctr r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) PPC_BCCTR_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 2: nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) patch_site 2b patch__flush_count_cache_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) nops 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .rept 278
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .balign 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) PPC_BCCTR_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) nops 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) .endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #define FLUSH_COUNT_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * This routine switches between two different tasks. The process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * state of one is saved on its kernel stack. Then the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * of the other is restored from its kernel stack. The memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * management hardware is updated to the second process's state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Finally, we can return to the second process, via interrupt_return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * On entry, r3 points to the THREAD for the current task, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * points to the THREAD for the new task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Note: there are two ways to get to the "going out" portion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * of this code; either by coming in via the entry (_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * or via "fork" which must set up an environment equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * to the "_switch" path. If you change this you'll have to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * the fork code also.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * The code which creates the new task context is in 'copy_thread'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * in arch/powerpc/kernel/process.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .align 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) _GLOBAL(_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) std r0,16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) stdu r1,-SWITCH_FRAME_SIZE(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* r3-r13 are caller saved -- Cort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) std r0,_NIP(r1) /* Return to switch caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) mfcr r23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) std r23,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) std r1,KSP(r3) /* Set old stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) kuap_check_amr r9, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) FLUSH_COUNT_CACHE /* Clobbers r9, ctr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * On SMP kernels, care must be taken because a task may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * scheduled off CPUx and on to CPUy. Memory ordering must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Cacheable stores on CPUx will be visible when the task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * scheduled on CPUy by virtue of the core scheduler barriers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * (see "Notes on Program-Order guarantees on SMP systems." in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * kernel/sched/core.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Uncacheable stores in the case of involuntary preemption must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * be taken care of. The smp_mb__after_spinlock() in __schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * is implemented as hwsync on powerpc, which orders MMIO too. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * long as there is an hwsync in the context switch path, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * be executed on the source CPU after the task has performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * all MMIO ops on that CPU, and on the destination CPU before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * task performs any MMIO ops there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * The kernel context switch path must contain a spin_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * which contains larx/stcx, which will clear any reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * of the task being switched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Cancel all explict user streams as they will have no use after context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * switch and will stop the HW from creating streams itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) addi r6,r4,-THREAD /* Convert THREAD to 'current' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) std r6,PACACURRENT(r13) /* Set new 'current' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #if defined(CONFIG_STACKPROTECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ld r6, TASK_CANARY(r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) std r6, PACA_CANARY(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ld r8,KSP(r4) /* new stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) BEGIN_MMU_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) b 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) clrrdi r6,r8,28 /* get its ESID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) clrrdi r9,r1,28 /* get current sp ESID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) FTR_SECTION_ELSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) clrrdi r6,r8,40 /* get its 1T ESID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) clrrdi r9,r1,40 /* get current sp 1T ESID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) clrldi. r0,r6,2 /* is new ESID c00000000? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) cror eq,4*cr1+eq,eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) beq 2f /* if yes, don't slbie it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Bolt in the new stack SLB entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ld r7,KSP_VSID(r4) /* Get new stack's VSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) oris r0,r6,(SLB_ESID_V)@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ori r0,r0,(SLB_NUM_BOLTED-1)@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) li r9,MMU_SEGSIZE_1T /* insert B field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Update the last bolted SLB. No write barriers are needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * here, provided we only update the current CPU's SLB shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ld r9,PACA_SLBSHADOWPTR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) li r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) li r12,SLBSHADOW_STACKVSID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) STDX_BE r7,r12,r9 /* Save VSID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) li r12,SLBSHADOW_STACKESID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) STDX_BE r0,r12,r9 /* Save ESID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * we have 1TB segments, the only CPUs known to have the errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * only support less than 1TB of system memory and we'll never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * actually hit this code path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) slbie r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) slbie r6 /* Workaround POWER5 < DD2.1 issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) slbmte r7,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) because we don't need to leave the 288-byte ABI gap at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) top of the kernel stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * PMU interrupts in radix may come in here. They will use r1, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * PACAKSAVE, so this stack switch will not cause a problem. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * will store to the process stack, which may then be migrated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * another CPU. However the rq lock release on this CPU paired with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * the rq lock acquire on the new CPU before the stack becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * active on the new CPU, will order those stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) mr r1,r8 /* start using new stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) std r7,PACAKSAVE(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ld r6,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) mtcrf 0xFF,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* r3-r13 are destroyed -- Cort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* convert old thread to its task_struct for return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) addi r3,r3,-THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ld r7,_NIP(r1) /* Return to _switch caller in new task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mtlr r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) addi r1,r1,SWITCH_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * touched, no exit work created, then this can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) .balign IFETCH_ALIGN_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .globl fast_interrupt_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) fast_interrupt_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) kuap_check_amr r3, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ld r5,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) andi. r0,r5,MSR_PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) bne .Lfast_user_interrupt_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) kuap_restore_amr r3, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) andi. r0,r5,MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) bne+ .Lfast_kernel_interrupt_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) addi r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) bl unrecoverable_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) b . /* should not get here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) .balign IFETCH_ALIGN_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) .globl interrupt_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) interrupt_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) _ASM_NOKPROBE_SYMBOL(interrupt_return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ld r4,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) andi. r0,r4,MSR_PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) beq .Lkernel_interrupt_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) addi r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bl interrupt_exit_user_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) cmpdi r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) bne- .Lrestore_nvgprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .Lfast_user_interrupt_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ld r11,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ld r12,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ld r10,_PPR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) mtspr SPRN_PPR,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) mtspr SPRN_SRR0,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mtspr SPRN_SRR1,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) stdcx. r0,0,r1 /* to clear the reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) FTR_SECTION_ELSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ldarx r0,0,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ld r3,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ld r4,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ld r5,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ld r6,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) REST_4GPRS(7, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) REST_2GPRS(11, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) REST_GPR(13, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mtcr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mtlr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mtctr r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) mtspr SPRN_XER,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) REST_4GPRS(2, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) REST_GPR(6, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) REST_GPR(0, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) REST_GPR(1, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) RFI_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) .Lrestore_nvgprs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) b .Lfast_user_interrupt_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) .balign IFETCH_ALIGN_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) .Lkernel_interrupt_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) addi r3,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) bl interrupt_exit_kernel_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .Lfast_kernel_interrupt_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) cmpdi cr1,r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ld r11,_NIP(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ld r12,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) mtspr SPRN_SRR0,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) mtspr SPRN_SRR1,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) stdcx. r0,0,r1 /* to clear the reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) FTR_SECTION_ELSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ldarx r0,0,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ld r3,_LINK(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ld r4,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ld r5,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ld r6,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) REST_4GPRS(7, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) REST_2GPRS(11, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) mtlr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mtctr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) mtspr SPRN_XER,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Leaving a stale exception_marker on the stack can confuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * the reliable stack unwinder later on. Clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) std r0,STACK_FRAME_OVERHEAD-16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) REST_4GPRS(2, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) bne- cr1,1f /* emulate stack store */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) mtcr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) REST_GPR(6, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) REST_GPR(0, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) REST_GPR(1, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) RFI_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 1: /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * Emulate stack store with update. New r1 value was already calculated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * and updated in our interrupt regs by emulate_loadstore, but we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * store the previous value of r1 to the stack before re-loading our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * registers from it, otherwise they could be clobbered. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * PACA_EXGEN as temporary storage to hold the store data, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * interrupts are disabled here so it won't be clobbered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mtcr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) std r9,PACA_EXGEN+0(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) addi r9,r1,INT_FRAME_SIZE /* get original r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) REST_GPR(6, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) REST_GPR(0, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) REST_GPR(1, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) std r9,0(r1) /* perform store component of stdu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ld r9,PACA_EXGEN+0(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) RFI_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) #endif /* CONFIG_PPC_BOOK3S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) #ifdef CONFIG_PPC_RTAS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * called with the MMU off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * In addition, we need to be in 32b mode, at least for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * Note: r3 is an input parameter to rtas, so don't trash it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) _GLOBAL(enter_rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) std r0,16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Because RTAS is running in 32b mode, it clobbers the high order half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * of all registers that it saves. We therefore save those registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) SAVE_GPR(2, r1) /* Save the TOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) SAVE_GPR(13, r1) /* Save paca */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) SAVE_NVGPRS(r1) /* Save the non-volatiles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) mfcr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) std r4,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mfctr r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) std r5,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mfspr r6,SPRN_XER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) std r6,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mfdar r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) std r7,_DAR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mfdsisr r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) std r8,_DSISR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Temporary workaround to clear CR until RTAS can be modified to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * ignore all bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) mtcr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) #ifdef CONFIG_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* There is no way it is acceptable to get here with interrupts enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * check it with the asm equivalent of WARN_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) lbz r0,PACAIRQSOFTMASK(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 1: tdeqi r0,IRQS_ENABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Hard-disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) mfmsr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) rldicl r7,r6,48,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) rotldi r7,r7,16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) mtmsrd r7,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Unfortunately, the stack pointer and the MSR are also clobbered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * so they are saved in the PACA which allows us to restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * our original state after RTAS returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) std r1,PACAR1(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) std r6,PACASAVEDMSR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Setup our real return addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) LOAD_REG_ADDR(r4,rtas_return_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) clrldi r4,r4,2 /* convert to realmode address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) mtlr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) andc r0,r6,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) li r9,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) andc r6,r0,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) __enter_rtas:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sync /* disable interrupts so SRR0/1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) mtmsrd r0 /* don't get trashed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) LOAD_REG_ADDR(r4, rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ld r5,RTASENTRY(r4) /* get the rtas->entry value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ld r4,RTASBASE(r4) /* get the rtas->base value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) mtspr SPRN_SRR0,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) mtspr SPRN_SRR1,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) RFI_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) rtas_return_loc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) FIXUP_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Clear RI and set SF before anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) mfmsr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) li r0,MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) andc r6,r6,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) or r6,r6,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) mtmsrd r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* relocation is off at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) GET_PACA(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) clrldi r4,r4,2 /* convert to realmode address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) bcl 20,31,$+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 0: mflr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ld r1,PACAR1(r4) /* Restore our SP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) mtspr SPRN_SRR0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) mtspr SPRN_SRR1,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) RFI_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) _ASM_NOKPROBE_SYMBOL(__enter_rtas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 1: .8byte rtas_restore_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rtas_restore_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* relocation is on at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) REST_GPR(2, r1) /* Restore the TOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) REST_GPR(13, r1) /* Restore paca */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) REST_NVGPRS(r1) /* Restore the non-volatiles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) GET_PACA(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ld r4,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mtcr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ld r5,_CTR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mtctr r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ld r6,_XER(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) mtspr SPRN_XER,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ld r7,_DAR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) mtdar r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ld r8,_DSISR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) mtdsisr r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ld r0,16(r1) /* get return address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) mtlr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) blr /* return to caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #endif /* CONFIG_PPC_RTAS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) _GLOBAL(enter_prom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) std r0,16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Because PROM is running in 32b mode, it clobbers the high order half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * of all registers that it saves. We therefore save those registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * PROM might touch to the stack. (r0, r3-r13 are caller saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) SAVE_GPR(2, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) SAVE_GPR(13, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) SAVE_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) mfcr r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) mfmsr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) std r10,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) std r11,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* Put PROM address in SRR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) mtsrr0 r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* Setup our trampoline return addr in LR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) bcl 20,31,$+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) 0: mflr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) addi r4,r4,(1f - 0b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) mtlr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Prepare a 32-bit mode big endian MSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) rlwinm r11,r11,0,1,31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) mtsrr1 r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #else /* CONFIG_PPC_BOOK3E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) andc r11,r11,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) mtsrr1 r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) RFI_TO_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) #endif /* CONFIG_PPC_BOOK3E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) 1: /* Return from OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) FIXUP_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* Just make sure that r1 top 32 bits didn't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * corrupt by OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rldicl r1,r1,0,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Restore the MSR (back to 64 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ld r0,_MSR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) MTMSRD(r0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Restore other registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) REST_GPR(2, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) REST_GPR(13, r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) REST_NVGPRS(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ld r4,_CCR(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) mtcr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) addi r1,r1,SWITCH_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ld r0,16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mtlr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) blr