^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * S390 low-level entry points.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 1999, 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Hartmut Penner (hp@de.ibm.com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Heiko Carstens <heiko.carstens@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/alternative-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/ctl_reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/dwarf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/sigp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/vx-insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/nospec-insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __PT_R0 = __PT_GPRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) __PT_R1 = __PT_GPRS + 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __PT_R2 = __PT_GPRS + 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __PT_R3 = __PT_GPRS + 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __PT_R4 = __PT_GPRS + 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) __PT_R5 = __PT_GPRS + 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __PT_R6 = __PT_GPRS + 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __PT_R7 = __PT_GPRS + 56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __PT_R8 = __PT_GPRS + 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __PT_R9 = __PT_GPRS + 72
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __PT_R10 = __PT_GPRS + 80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __PT_R11 = __PT_GPRS + 88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __PT_R12 = __PT_GPRS + 96
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __PT_R13 = __PT_GPRS + 104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __PT_R14 = __PT_GPRS + 112
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __PT_R15 = __PT_GPRS + 120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) STACK_SIZE = 1 << STACK_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) _TIF_SYSCALL_TRACEPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) _CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) _LPP_OFFSET = __LC_LPP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .macro TRACE_IRQS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) basr %r2,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) brasl %r14,trace_hardirqs_on_caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .macro TRACE_IRQS_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) basr %r2,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) brasl %r14,trace_hardirqs_off_caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .macro LOCKDEP_SYS_EXIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #ifdef CONFIG_LOCKDEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) tm __PT_PSW+1(%r11),0x01 # returning to user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) jz .+10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) brasl %r14,lockdep_sys_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .macro CHECK_STACK savearea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #ifdef CONFIG_CHECK_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) lghi %r14,\savearea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) jz stack_overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) .macro CHECK_VMAP_STACK savearea,oklabel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) lgr %r14,%r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) nill %r14,0x10000 - STACK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) oill %r14,STACK_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) clg %r14,__LC_KERNEL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) je \oklabel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) clg %r14,__LC_ASYNC_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) je \oklabel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) clg %r14,__LC_NODAT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) je \oklabel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) clg %r14,__LC_RESTART_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) je \oklabel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) lghi %r14,\savearea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) j stack_overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) j \oklabel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .macro SWITCH_ASYNC savearea,timer,clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) tmhh %r8,0x0001 # interrupting from user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) jnz 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #if IS_ENABLED(CONFIG_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) lgr %r14,%r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) larl %r13,.Lsie_gmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) slgr %r14,%r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) lghi %r13,.Lsie_done - .Lsie_gmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) clgr %r14,%r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) jhe 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) lghi %r11,\savearea # inside critical section, do cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) brasl %r14,.Lcleanup_sie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 0: larl %r13,.Lpsw_idle_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) cgr %r13,%r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) jne 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) larl %r1,smp_cpu_mtid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) llgf %r1,0(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ltgr %r1,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) jz 2f # no SMT, skip mt_cycles calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) larl %r3,mt_cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ag %r3,__LC_PERCPU_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) la %r4,__SF_EMPTY+16(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 1: lg %r0,0(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) slg %r0,0(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) alg %r0,64(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) stg %r0,0(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) la %r3,8(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) la %r4,8(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) brct %r1,1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mvc __TIMER_IDLE_EXIT(8,%r2), \timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) # account system time going idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) lg %r13,__LC_STEAL_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) alg %r13,__CLOCK_IDLE_ENTER(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) slg %r13,__LC_LAST_UPDATE_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) stg %r13,__LC_STEAL_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) lg %r13,__LC_SYSTEM_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) alg %r13,__LC_LAST_UPDATE_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) slg %r13,__TIMER_IDLE_ENTER(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) stg %r13,__LC_SYSTEM_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) nihh %r8,0xfcfd # clear wait state and irq bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) slgr %r14,%r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) srag %r14,%r14,STACK_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) jnz 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) CHECK_STACK \savearea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) j 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 4: UPDATE_VTIME %r14,%r15,\timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 5: lg %r15,__LC_ASYNC_STACK # load async stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 6: la %r11,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .macro UPDATE_VTIME w1,w2,enter_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) lg \w1,__LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) lg \w2,__LC_LAST_UPDATE_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) slg \w1,\enter_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) slg \w2,__LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) alg \w1,__LC_USER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) alg \w2,__LC_SYSTEM_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) stg \w1,__LC_USER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) stg \w2,__LC_SYSTEM_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .macro RESTORE_SM_CLEAR_PER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) stg %r8,__LC_RETURN_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ni __LC_RETURN_PSW,0xbf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ssm __LC_RETURN_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .macro ENABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) stosm __SF_EMPTY(%r15),3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .macro ENABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) TRACE_IRQS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ENABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .macro DISABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) stnsm __SF_EMPTY(%r15),0xfc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .macro DISABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) DISABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) TRACE_IRQS_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .macro STCK savearea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .insn s,0xb27c0000,\savearea # store clock fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .insn s,0xb2050000,\savearea # store clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * The TSTMSK macro generates a test-under-mask instruction by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * calculating the memory offset for the specified mask value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Mask value can be any constant. The macro shifts the mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * value to calculate the memory offset for the test-under-mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .macro TSTMSK addr, mask, size=8, bytepos=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .if (\bytepos < \size) && (\mask >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .if (\mask & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .error "Mask exceeds byte boundary"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .exitm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .ifeq \mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .error "Mask must not be zero"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) off = \size - \bytepos - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) tm off+\addr, \mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .macro BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ALTERNATIVE "", ".long 0xb2e8c000", 82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .macro BPON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ALTERNATIVE "", ".long 0xb2e8d000", 82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .macro BPENTER tif_ptr,tif_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) "", 82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .macro BPEXIT tif_ptr,tif_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) TSTMSK \tif_ptr,\tif_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ALTERNATIVE "jz .+8; .long 0xb2e8c000", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) "jnz .+8; .long 0xb2e8d000", 82
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) GEN_BR_THUNK %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) GEN_BR_THUNK %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) GEN_BR_THUNK %r14,%r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .section .kprobes.text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .Ldummy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * This nop exists only in order to avoid that __switch_to starts at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * the beginning of the kprobes text section. In that case we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * have several symbols at the same address. E.g. objdump would take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * an arbitrary symbol name when disassembling this code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * With the added nop in between the __switch_to symbol is unique
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) nop 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ENTRY(__bpon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .globl __bpon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) BPON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) BR_EX %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ENDPROC(__bpon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Scheduler resume function, called by switch_to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * gpr2 = (task_struct *) prev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * gpr3 = (task_struct *) next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * gpr2 = prev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ENTRY(__switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) lghi %r4,__TASK_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) lghi %r1,__TASK_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) llill %r5,STACK_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) lg %r15,0(%r4,%r3) # start of kernel stack of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) agr %r15,%r5 # end of kernel stack of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) stg %r3,__LC_CURRENT # store task struct of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) stg %r15,__LC_KERNEL_STACK # store end of kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) aghi %r3,__TASK_pid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) BR_EX %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ENDPROC(__switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #if IS_ENABLED(CONFIG_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * sie64a calling convention:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * %r2 pointer to sie control block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * %r3 guest register save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ENTRY(sie64a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) jno .Lsie_load_guest_gprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) brasl %r14,load_fpu_regs # load guest fp/vx regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .Lsie_load_guest_gprs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) lmg %r0,%r13,0(%r3) # load guest gprs 0-13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) lg %r14,__LC_GMAP # get gmap pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ltgr %r14,%r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) jz .Lsie_gmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .Lsie_gmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tm __SIE_PROG20+3(%r14),3 # last exit...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) jnz .Lsie_skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) TSTMSK __LC_CPU_FLAGS,_CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) jo .Lsie_skip # exit if fp/vx regs changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .Lsie_entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) sie 0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) .Lsie_skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .Lsie_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) # some program checks are suppressing. C code (e.g. do_protection_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) # Other instructions between sie64a and .Lsie_done should not cause program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) # See also .Lcleanup_sie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .Lrewind_pad6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) nopr 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .Lrewind_pad4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) nopr 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .Lrewind_pad2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) nopr 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .globl sie_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) sie_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) stmg %r0,%r13,0(%r14) # save guest gprs 0-13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) xgr %r0,%r0 # clear guest registers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) xgr %r1,%r1 # prevent speculative use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) xgr %r2,%r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) xgr %r3,%r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) xgr %r4,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) xgr %r5,%r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) BR_EX %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .Lsie_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) lghi %r14,-EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) j sie_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) EX_TABLE(.Lrewind_pad6,.Lsie_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) EX_TABLE(.Lrewind_pad4,.Lsie_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) EX_TABLE(.Lrewind_pad2,.Lsie_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) EX_TABLE(sie_exit,.Lsie_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ENDPROC(sie64a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) EXPORT_SYMBOL(sie64a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) EXPORT_SYMBOL(sie_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * SVC interrupt handler routine. System calls are synchronous events and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * are entered with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ENTRY(system_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) stpt __LC_SYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) stmg %r8,%r15,__LC_SAVE_AREA_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) lghi %r14,_PIF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .Lsysc_per:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) lghi %r13,__TASK_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) lg %r15,__LC_KERNEL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) stmg %r0,%r7,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) stg %r14,__PT_FLAGS(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ENABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .Lsysc_do_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) # clear user controlled register to prevent speculative use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) xgr %r0,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) # load address of system call table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) lg %r10,__THREAD_sysc_table(%r13,%r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) llgh %r8,__PT_INT_CODE+2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) slag %r8,%r8,3 # shift and test for svc 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) jnz .Lsysc_nr_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) # svc 0: system call number in %r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) llgfr %r1,%r1 # clear high word in r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) sth %r1,__PT_INT_CODE+2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) cghi %r1,NR_syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) jnl .Lsysc_nr_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) slag %r8,%r1,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .Lsysc_nr_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) stg %r2,__PT_ORIG_GPR2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) stg %r7,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) lg %r9,0(%r8,%r10) # get system call add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) TSTMSK __TI_flags(%r12),_TIF_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) jnz .Lsysc_tracesys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) BASR_EX %r14,%r9 # call sys_xxxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) stg %r2,__PT_R2(%r11) # store return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) .Lsysc_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #ifdef CONFIG_DEBUG_RSEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) lgr %r2,%r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) brasl %r14,rseq_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) LOCKDEP_SYS_EXIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .Lsysc_tif:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) DISABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) TSTMSK __PT_FLAGS(%r11),_PIF_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) jnz .Lsysc_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) TSTMSK __TI_flags(%r12),_TIF_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) jnz .Lsysc_work # check for work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) jnz .Lsysc_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .Lsysc_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) DISABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) TSTMSK __LC_CPU_FLAGS, _CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) jz .Lsysc_skip_fpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) brasl %r14,load_fpu_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .Lsysc_skip_fpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) stpt __LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) lmg %r0,%r15,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) b __LC_RETURN_LPSWE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) # One of the work bits is on. Find out which one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .Lsysc_work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ENABLE_INTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) jo .Lsysc_reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) jo .Lsysc_syscall_restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #ifdef CONFIG_UPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) TSTMSK __TI_flags(%r12),_TIF_UPROBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) jo .Lsysc_uprobe_notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) jo .Lsysc_guarded_storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) jo .Lsysc_singlestep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #ifdef CONFIG_LIVEPATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) jo .Lsysc_patch_pending # handle live patching just before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) # signals and possible syscall restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) jo .Lsysc_syscall_restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) jo .Lsysc_sigpending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) jo .Lsysc_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) jnz .Lsysc_asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) j .Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) # _TIF_NEED_RESCHED is set, call schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .Lsysc_reschedule:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) jg schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) # _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .Lsysc_asce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) jz .Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) jnz .Lsysc_set_fs_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) j .Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) .Lsysc_set_fs_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) jg set_fs_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) # _TIF_SIGPENDING is set, call do_signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .Lsysc_sigpending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) brasl %r14,do_signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) jno .Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .Lsysc_do_syscall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) lghi %r13,__TASK_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) lghi %r1,0 # svc 0 returns -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) j .Lsysc_do_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) # _TIF_NOTIFY_RESUME is set, call do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) .Lsysc_notify_resume:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) jg do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) # _TIF_UPROBE is set, call uprobe_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #ifdef CONFIG_UPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .Lsysc_uprobe_notify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) jg uprobe_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .Lsysc_guarded_storage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) jg gs_load_bc_cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) # _TIF_PATCH_PENDING is set, call klp_update_patch_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #ifdef CONFIG_LIVEPATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) .Lsysc_patch_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) lg %r2,__LC_CURRENT # pass pointer to task struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) jg klp_update_patch_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) # _PIF_PER_TRAP is set, call do_per_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .Lsysc_singlestep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) jg do_per_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) # _PIF_SYSCALL_RESTART is set, repeat the current system call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) .Lsysc_syscall_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) lmg %r1,%r7,__PT_R1(%r11) # load svc arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) lg %r2,__PT_ORIG_GPR2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) j .Lsysc_do_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) # and after the system call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .Lsysc_tracesys:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) la %r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) llgh %r0,__PT_INT_CODE+2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) stg %r0,__PT_R2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) brasl %r14,do_syscall_trace_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) lghi %r0,NR_syscalls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) clgr %r0,%r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) jnh .Lsysc_tracenogo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) sllg %r8,%r2,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) lg %r9,0(%r8,%r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) lmg %r3,%r7,__PT_R3(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) stg %r7,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) lg %r2,__PT_ORIG_GPR2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) BASR_EX %r14,%r9 # call sys_xxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) stg %r2,__PT_R2(%r11) # store return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .Lsysc_tracenogo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) TSTMSK __TI_flags(%r12),_TIF_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) jz .Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) larl %r14,.Lsysc_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) jg do_syscall_trace_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ENDPROC(system_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) # a new process exits the kernel with ret_from_fork
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ENTRY(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) la %r11,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) brasl %r14,schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) jne .Lsysc_tracenogo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) # it's a kernel thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) lmg %r9,%r10,__PT_R9(%r11) # load gprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) la %r2,0(%r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) BASR_EX %r14,%r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) j .Lsysc_tracenogo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ENDPROC(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ENTRY(kernel_thread_starter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) la %r2,0(%r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) BASR_EX %r14,%r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) j .Lsysc_tracenogo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ENDPROC(kernel_thread_starter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Program check handler routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ENTRY(pgm_check_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) stpt __LC_SYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) stmg %r8,%r15,__LC_SAVE_AREA_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) lg %r10,__LC_LAST_BREAK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) srag %r11,%r10,12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) jnz 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* if __LC_LAST_BREAK is < 4096, it contains one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * the lpswe addresses in lowcore. Set it to 1 (initial state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * to prevent leaking that address to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) lghi %r10,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 0: lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) lghi %r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) lmg %r8,%r9,__LC_PGM_OLD_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) tmhh %r8,0x0001 # test problem state bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) jnz 3f # -> fault in user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #if IS_ENABLED(CONFIG_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) # cleanup critical section for program checks in sie64a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) lgr %r14,%r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) larl %r13,.Lsie_gmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) slgr %r14,%r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) lghi %r13,.Lsie_done - .Lsie_gmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) clgr %r14,%r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) jhe 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) larl %r9,sie_exit # skip forward to sie_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) lghi %r11,_PIF_GUEST_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 1: tmhh %r8,0x4000 # PER bit set in old PSW ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) jnz 2f # -> enabled, can't be a double fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) tm __LC_PGM_ILC+3,0x80 # check for per exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) jnz .Lpgm_svcper # -> single stepped svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 2: CHECK_STACK __LC_SAVE_AREA_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) # CHECK_VMAP_STACK branches to stack_overflow or 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) lg %r15,__LC_KERNEL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) lgr %r14,%r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) aghi %r14,__TASK_thread # pointer to thread_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) lghi %r13,__LC_PGM_TDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) tm __LC_PGM_ILC+2,0x02 # check for transaction abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) jz 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) mvc __THREAD_trap_tdb(256,%r14),0(%r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 4: stg %r10,__THREAD_last_break(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 5: lgr %r13,%r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) la %r11,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) stmg %r0,%r7,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) # clear user controlled registers to prevent speculative use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) xgr %r0,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) xgr %r1,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) xgr %r2,%r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) xgr %r3,%r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) xgr %r4,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) xgr %r5,%r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) xgr %r6,%r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) xgr %r7,%r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) stmg %r8,%r9,__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) stg %r13,__PT_FLAGS(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) stg %r10,__PT_ARGS(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) tm __LC_PGM_ILC+3,0x80 # check for per exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) jz 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) tmhh %r8,0x0001 # kernel per event ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) jz .Lpgm_kprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) RESTORE_SM_CLEAR_PER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) larl %r1,pgm_check_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) llgh %r10,__PT_INT_CODE+2(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) nill %r10,0x007f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) sll %r10,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) je .Lpgm_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) lg %r9,0(%r10,%r1) # load address of handler routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) BASR_EX %r14,%r9 # branch to interrupt-handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) .Lpgm_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) LOCKDEP_SYS_EXIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) tm __PT_PSW+1(%r11),0x01 # returning to user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) jno .Lsysc_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) jo .Lsysc_do_syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) j .Lsysc_tif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) # PER event in supervisor state, must be kprobes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .Lpgm_kprobe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) RESTORE_SM_CLEAR_PER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) brasl %r14,do_per_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) j .Lpgm_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) # single stepped system call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .Lpgm_svcper:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) larl %r14,.Lsysc_per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) stg %r14,__LC_RETURN_PSW+8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) lpswe __LC_RETURN_PSW # branch to .Lsysc_per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ENDPROC(pgm_check_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * IO interrupt handler routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ENTRY(io_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) STCK __LC_INT_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) stpt __LC_ASYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) lmg %r8,%r9,__LC_IO_OLD_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) stmg %r0,%r7,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) # clear user controlled registers to prevent speculative use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) xgr %r0,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) xgr %r1,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) xgr %r2,%r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) xgr %r3,%r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) xgr %r4,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) xgr %r5,%r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) xgr %r6,%r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) xgr %r7,%r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) xgr %r10,%r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) stmg %r8,%r9,__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) jo .Lio_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) TRACE_IRQS_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) .Lio_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) lghi %r3,IO_INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) jz .Lio_call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) lghi %r3,THIN_INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .Lio_call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) brasl %r14,do_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) jz .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) tpi 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) jz .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) j .Lio_loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .Lio_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) LOCKDEP_SYS_EXIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) TSTMSK __TI_flags(%r12),_TIF_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) jnz .Lio_work # there is work to do (signals etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) TSTMSK __LC_CPU_FLAGS,_CIF_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) jnz .Lio_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) .Lio_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) TRACE_IRQS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) tm __PT_PSW+1(%r11),0x01 # returning to user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) jno .Lio_exit_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) stpt __LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .Lio_exit_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) lmg %r0,%r15,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) b __LC_RETURN_LPSWE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) .Lio_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) # There is work todo, find out in which context we have been interrupted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) # 1) if we return to user space we can do all _TIF_WORK work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) # 2) if we return to kernel code and kvm is enabled check if we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) # modify the psw to leave SIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) # 3) if we return to kernel code and preemptive scheduling is enabled check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) # the preemption counter and if it is zero call preempt_schedule_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) # Before any work can be done, a switch to the kernel stack is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) .Lio_work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) tm __PT_PSW+1(%r11),0x01 # returning to user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) jo .Lio_work_user # yes -> do resched & signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) # check for preemptive scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) icm %r0,15,__LC_PREEMPT_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) jnz .Lio_restore # preemption is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) jno .Lio_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) # switch to kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) lg %r1,__PT_R15(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) la %r11,STACK_FRAME_OVERHEAD(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) lgr %r15,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) brasl %r14,preempt_schedule_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) j .Lio_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) # Need to do work before returning to userspace, switch to kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) .Lio_work_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) lg %r1,__LC_KERNEL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) la %r11,STACK_FRAME_OVERHEAD(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) lgr %r15,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) # One of the work bits is on. Find out which one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) jo .Lio_reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #ifdef CONFIG_LIVEPATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) jo .Lio_patch_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) jo .Lio_sigpending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) jo .Lio_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) jo .Lio_guarded_storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) TSTMSK __LC_CPU_FLAGS,_CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) jo .Lio_vxrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) jnz .Lio_asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) # _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) .Lio_asce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) jz .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) jnz .Lio_set_fs_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) .Lio_set_fs_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) larl %r14,.Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) jg set_fs_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) # CIF_FPU is set, restore floating-point controls and floating-point registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) .Lio_vxrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) larl %r14,.Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) jg load_fpu_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) # _TIF_GUARDED_STORAGE is set, call guarded_storage_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .Lio_guarded_storage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ENABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) brasl %r14,gs_load_bc_cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) DISABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) # _TIF_NEED_RESCHED is set, call schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .Lio_reschedule:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ENABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) brasl %r14,schedule # call scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) DISABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) # _TIF_PATCH_PENDING is set, call klp_update_patch_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) #ifdef CONFIG_LIVEPATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .Lio_patch_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) lg %r2,__LC_CURRENT # pass pointer to task struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) larl %r14,.Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) jg klp_update_patch_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) # _TIF_SIGPENDING or is set, call do_signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .Lio_sigpending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ENABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) brasl %r14,do_signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) DISABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .Lio_notify_resume:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ENABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) brasl %r14,do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) DISABLE_INTS_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) ENDPROC(io_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * External interrupt handler routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ENTRY(ext_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) STCK __LC_INT_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) stpt __LC_ASYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) lmg %r8,%r9,__LC_EXT_OLD_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) stmg %r0,%r7,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) # clear user controlled registers to prevent speculative use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) xgr %r0,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) xgr %r1,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) xgr %r2,%r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) xgr %r3,%r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) xgr %r4,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) xgr %r5,%r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) xgr %r6,%r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) xgr %r7,%r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) xgr %r10,%r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) stmg %r8,%r9,__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) lghi %r1,__LC_EXT_PARAMS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) jo .Lio_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) TRACE_IRQS_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) lghi %r3,EXT_INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) brasl %r14,do_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) j .Lio_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ENDPROC(ext_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Load idle PSW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ENTRY(psw_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) stg %r14,(__SF_GPRS+8*8)(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) stg %r3,__SF_EMPTY(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) larl %r1,.Lpsw_idle_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) stg %r1,__SF_EMPTY+8(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) larl %r1,smp_cpu_mtid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) llgf %r1,0(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ltgr %r1,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) jz .Lpsw_idle_stcctm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .Lpsw_idle_stcctm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) BPON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) STCK __CLOCK_IDLE_ENTER(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) stpt __TIMER_IDLE_ENTER(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) lpswe __SF_EMPTY(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .Lpsw_idle_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) BR_EX %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ENDPROC(psw_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * Store floating-point controls and floating-point or vector register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * depending whether the vector facility is available. A critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * cleanup assures that the registers are stored even if interrupted for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * some other work. The CIF_FPU flag is set to trigger a lazy restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * of the register contents at return from io or a system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) ENTRY(save_fpu_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) stnsm __SF_EMPTY(%r15),0xfc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) lg %r2,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) aghi %r2,__TASK_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) TSTMSK __LC_CPU_FLAGS,_CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) jo .Lsave_fpu_regs_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) stfpc __THREAD_FPU_fpc(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) lg %r3,__THREAD_FPU_regs(%r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) jz .Lsave_fpu_regs_fp # no -> store FP regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) j .Lsave_fpu_regs_done # -> set CIF_FPU flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .Lsave_fpu_regs_fp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) std 0,0(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) std 1,8(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) std 2,16(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) std 3,24(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) std 4,32(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) std 5,40(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) std 6,48(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) std 7,56(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) std 8,64(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) std 9,72(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) std 10,80(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) std 11,88(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) std 12,96(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) std 13,104(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) std 14,112(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) std 15,120(%r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) .Lsave_fpu_regs_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) oi __LC_CPU_FLAGS+7,_CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) .Lsave_fpu_regs_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ssm __SF_EMPTY(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) BR_EX %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .Lsave_fpu_regs_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ENDPROC(save_fpu_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) EXPORT_SYMBOL(save_fpu_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * Load floating-point controls and floating-point or vector registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * A critical section cleanup assures that the register contents are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * loaded even if interrupted for some other work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * There are special calling conventions to fit into sysc and io return work:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * %r15: <kernel stack>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * The function requires:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * %r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) load_fpu_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) stnsm __SF_EMPTY(%r15),0xfc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) lg %r4,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) aghi %r4,__TASK_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) TSTMSK __LC_CPU_FLAGS,_CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) jno .Lload_fpu_regs_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) lfpc __THREAD_FPU_fpc(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) jz .Lload_fpu_regs_fp # -> no VX, load FP regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) VLM %v0,%v15,0,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) VLM %v16,%v31,256,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) j .Lload_fpu_regs_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) .Lload_fpu_regs_fp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ld 0,0(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ld 1,8(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ld 2,16(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ld 3,24(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ld 4,32(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ld 5,40(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ld 6,48(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ld 7,56(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ld 8,64(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ld 9,72(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ld 10,80(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ld 11,88(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ld 12,96(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ld 13,104(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ld 14,112(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) ld 15,120(%r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .Lload_fpu_regs_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ni __LC_CPU_FLAGS+7,255-_CIF_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .Lload_fpu_regs_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ssm __SF_EMPTY(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) BR_EX %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .Lload_fpu_regs_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ENDPROC(load_fpu_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * Machine check handler routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ENTRY(mcck_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) STCK __LC_MCCK_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) BPOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) la %r1,4095 # validate r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) sckc __LC_CLOCK_COMPARATOR # validate comparator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) lg %r12,__LC_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) lmg %r8,%r9,__LC_MCK_OLD_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) jo .Lmcck_panic # yes -> rest of mcck code invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) jno .Lmcck_panic # control registers invalid -> panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) la %r14,4095
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ptlb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) nill %r11,0xfc00 # MCESA_ORIGIN_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) jno 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) jno 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) jo 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) sr %r14,%r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 0: sfpc %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) jo 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) lghi %r14,__LC_FPREGS_SAVE_AREA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ld %f0,0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ld %f1,8(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ld %f2,16(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ld %f3,24(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ld %f4,32(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ld %f5,40(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ld %f6,48(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ld %f7,56(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ld %f8,64(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ld %f9,72(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ld %f10,80(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ld %f11,88(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ld %f12,96(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ld %f13,104(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ld %f14,112(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ld %f15,120(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) j 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 0: VLM %v0,%v15,0,%r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) VLM %v16,%v31,256,%r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) jo 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) la %r14,__LC_SYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) jl 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) la %r14,__LC_ASYNC_ENTER_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 0: clc 0(8,%r14),__LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) jl 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) la %r14,__LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) jl 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) la %r14,__LC_LAST_UPDATE_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 2: spt 0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) jno .Lmcck_panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) tmhh %r8,0x0001 # interrupting from user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) jnz 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) jno .Lmcck_panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) .Lmcck_skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) lghi %r14,__LC_GPREGS_SAVE_AREA+64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) stmg %r0,%r7,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) # clear user controlled registers to prevent speculative use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) xgr %r0,%r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) xgr %r1,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) xgr %r2,%r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) xgr %r3,%r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) xgr %r4,%r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) xgr %r5,%r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) xgr %r6,%r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) xgr %r7,%r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) xgr %r10,%r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) mvc __PT_R8(64,%r11),0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) stmg %r8,%r9,__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) brasl %r14,s390_do_machine_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) cghi %r2,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) je .Lmcck_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) lg %r1,__LC_KERNEL_STACK # switch to kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) la %r11,STACK_FRAME_OVERHEAD(%r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) lgr %r15,%r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) TRACE_IRQS_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) brasl %r14,s390_handle_mcck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) TRACE_IRQS_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) .Lmcck_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) lmg %r0,%r10,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) jno 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) stpt __LC_EXIT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 0: lmg %r11,%r15,__PT_R11(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) b __LC_RETURN_MCCK_LPSWE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .Lmcck_panic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) lg %r15,__LC_NODAT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) la %r11,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) j .Lmcck_skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ENDPROC(mcck_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) # PSW restart interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ENTRY(restart_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) stg %r15,__LC_SAVE_AREA_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) lg %r15,__LC_RESTART_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) lg %r2,__LC_RESTART_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) lg %r3,__LC_RESTART_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) ltgr %r3,%r3 # test source cpu address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) jm 1f # negative -> skip source stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) brc 10,0b # wait for status stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 1: basr %r14,%r1 # call function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) stap __SF_EMPTY(%r15) # store cpu address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) llgh %r3,__SF_EMPTY(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) brc 2,2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 3: j 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) ENDPROC(restart_int_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .section .kprobes.text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * The synchronous or the asynchronous stack overflowed. We are dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * No need to properly save the registers, we are going to panic anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * Setup a pt_regs so that show_trace can provide a good call trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) ENTRY(stack_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) lg %r15,__LC_NODAT_STACK # change to panic stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) la %r11,STACK_FRAME_OVERHEAD(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) stmg %r0,%r7,__PT_R0(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) stmg %r8,%r9,__PT_PSW(%r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) mvc __PT_R8(64,%r11),0(%r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) lgr %r2,%r11 # pass pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) jg kernel_stack_overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) ENDPROC(stack_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) #if IS_ENABLED(CONFIG_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) .Lcleanup_sie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) je 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) larl %r13,.Lsie_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) slgr %r9,%r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) lghi %r13,.Lsie_skip - .Lsie_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) clgr %r9,%r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) jh 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) larl %r9,sie_exit # skip forward to sie_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) BR_EX %r14,%r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) .section .rodata, "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #define SYSCALL(esame,emu) .quad __s390x_ ## esame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .globl sys_call_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) sys_call_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) #include "asm/syscall_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) #undef SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) #define SYSCALL(esame,emu) .quad __s390_ ## emu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) .globl sys_call_table_emu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) sys_call_table_emu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) #include "asm/syscall_table.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #undef SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) #endif