^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/kup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) typedef long (*syscall_fn)(long, long, long, long, long, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* Has to run notrace because it is entered not completely "reconciled" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) notrace long system_call_exception(long r3, long r4, long r5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) long r6, long r7, long r8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long r0, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) syscall_fn f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) trace_hardirqs_off(); /* finish reconciling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (IS_ENABLED(CONFIG_PPC_BOOK3S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) BUG_ON(!(regs->msr & MSR_RI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) BUG_ON(!(regs->msr & MSR_PR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) BUG_ON(!FULL_REGS(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) BUG_ON(regs->softe != IRQS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) kuap_check_amr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) account_cpu_user_entry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifdef CONFIG_PPC_SPLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (IS_ENABLED(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) firmware_has_feature(FW_FEATURE_SPLPAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct lppaca *lp = local_paca->lppaca_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) accumulate_stolen_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * This is not required for the syscall exit path, but makes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * stack frame look nicer. If this was initialised in the first stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * frame, or if the unwinder was taught the first stack frame always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * returns to user with IRQS_ENABLED, this store could be avoided!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) regs->softe = IRQS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (unlikely(regs->trap == 0x7ff0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Unsupported scv vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * We use the return value of do_syscall_trace_enter() as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * syscall number. If the syscall was rejected for any reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * do_syscall_trace_enter() returns an invalid syscall number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * and the test against NR_syscalls will fail and the return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * value to be used is in regs->gpr[3].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) r0 = do_syscall_trace_enter(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (unlikely(r0 >= NR_syscalls))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) r3 = regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) r4 = regs->gpr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) r5 = regs->gpr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) r6 = regs->gpr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) r7 = regs->gpr[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) r8 = regs->gpr[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } else if (unlikely(r0 >= NR_syscalls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (unlikely(regs->trap == 0x7ff0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Unsupported scv vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* May be faster to do array_index_nospec? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (unlikely(is_32bit_task())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) f = (void *)compat_sys_call_table[r0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) r3 &= 0x00000000ffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) r4 &= 0x00000000ffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) r5 &= 0x00000000ffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) r6 &= 0x00000000ffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) r7 &= 0x00000000ffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) r8 &= 0x00000000ffffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) f = (void *)sys_call_table[r0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return f(r3, r4, r5, r6, r7, r8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * local irqs must be disabled. Returns false if the caller must re-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * them, check for new work, and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* This must be done with RI=1 because tracing may touch vmaps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) trace_hardirqs_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* This pattern matches prep_irq_for_idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (clear_ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __hard_EE_RI_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (unlikely(lazy_irq_pending_nocheck())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Took an interrupt, may have more exit work to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (clear_ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) __hard_RI_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) trace_hardirqs_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) local_paca->irq_happened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) irq_soft_mask_set(IRQS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * This should be called after a syscall returns, with r3 the return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * from the syscall. If this function returns non-zero, the system call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * exit assembly should additionally load all GPR registers and CTR and XER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * from the interrupt frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * The function graph tracer can not trace the return side of this function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) notrace unsigned long syscall_exit_prepare(unsigned long r3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) long scv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long *ti_flagsp = ¤t_thread_info()->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long ti_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kuap_check_amr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) regs->result = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Check whether the syscall is issued inside a restartable sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) rseq_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ti_flags = *ti_flagsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && !scv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) r3 = -r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) regs->ccr |= 0x10000000; /* Set SO bit in CR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (ti_flags & _TIF_RESTOREALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ret = _TIF_RESTOREALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) regs->gpr[3] = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) regs->gpr[3] = r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) do_syscall_trace_leave(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ret |= _TIF_RESTOREALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ti_flags = READ_ONCE(*ti_flagsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ti_flags & _TIF_NEED_RESCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * SIGPENDING must restore signal handler function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * argument GPRs, and some non-volatiles (e.g., r1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Restore all for now. This could be made lighter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (ti_flags & _TIF_SIGPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ret |= _TIF_RESTOREALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) do_notify_resume(regs, ti_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ti_flags = READ_ONCE(*ti_flagsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unlikely((ti_flags & _TIF_RESTORE_TM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) restore_tm_state(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned long mathflags = MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (cpu_has_feature(CPU_FTR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mathflags |= MSR_VEC | MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) else if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) mathflags |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * If userspace MSR has all available FP bits set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * then they are live and no need to restore. If not,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * it means the regs were given up and restore_math
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * may decide to restore them (to avoid taking an FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * fault).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if ((regs->msr & mathflags) != mathflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) restore_math(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* scv need not set RI=0 because SRRs are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (unlikely(!prep_irq_for_enabled_exit(!scv))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) local_paca->tm_scratch = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) account_cpu_user_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct thread_struct *ts = ¤t->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned long *ti_flagsp = ¤t_thread_info()->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned long ti_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (IS_ENABLED(CONFIG_PPC_BOOK3S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) BUG_ON(!(regs->msr & MSR_RI));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) BUG_ON(!(regs->msr & MSR_PR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) BUG_ON(!FULL_REGS(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) BUG_ON(regs->softe != IRQS_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * We don't need to restore AMR on the way back to userspace for KUAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * AMR can only have been unlocked if we interrupted the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) kuap_check_amr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ti_flags = READ_ONCE(*ti_flagsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) local_irq_enable(); /* returning to user: may enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (ti_flags & _TIF_NEED_RESCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (ti_flags & _TIF_SIGPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ret |= _TIF_RESTOREALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) do_notify_resume(regs, ti_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ti_flags = READ_ONCE(*ti_flagsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unlikely((ti_flags & _TIF_RESTORE_TM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) restore_tm_state(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long mathflags = MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (cpu_has_feature(CPU_FTR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mathflags |= MSR_VEC | MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) else if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) mathflags |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* See above restore_math comment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if ((regs->msr & mathflags) != mathflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) restore_math(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (unlikely(!prep_irq_for_enabled_exit(true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * Check to see if the dbcr0 register is set up to debug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Use the internal debug mode bit to do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mtmsr(mfmsr() & ~MSR_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mtspr(SPRN_DBCR0, ts->debug.dbcr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mtspr(SPRN_DBSR, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) local_paca->tm_scratch = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) account_cpu_user_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void unrecoverable_exception(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void preempt_schedule_irq(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned long *ti_flagsp = ¤t_thread_info()->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long amr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unrecoverable_exception(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) BUG_ON(regs->msr & MSR_PR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) BUG_ON(!FULL_REGS(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) amr = kuap_get_and_check_amr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (regs->softe == IRQS_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Returning to a kernel context with local irqs enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) WARN_ON_ONCE(!(regs->msr & MSR_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (IS_ENABLED(CONFIG_PREEMPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Return to preemptible kernel context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (unlikely(*ti_flagsp & _TIF_NEED_RESCHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (preempt_count() == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) preempt_schedule_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (unlikely(!prep_irq_for_enabled_exit(true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Can't local_irq_restore to replay if we were in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * interrupt context. Must replay directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (irqs_disabled_flags(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) replay_soft_interrupts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Took an interrupt, may have more exit work to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Returning to a kernel context with local irqs disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) __hard_EE_RI_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (regs->msr & MSR_EE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) local_paca->tm_scratch = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * which would cause Read-After-Write stalls. Hence, we take the AMR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * value from the check above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) kuap_restore_amr(regs, amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #endif