^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* arch/sparc64/kernel/kprobes.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* We do not have hardware single-stepping on sparc64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * So we implement software single-stepping with breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * traps. The top-level scheme is similar to that used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * in the x86 kprobes implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * In the kprobe->ainsn.insn[] array we store the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * instruction at index zero and a break instruction at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * index one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * When we hit a kprobe we:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * - Run the pre-handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * - Remember "regs->tnpc" and interrupt level stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * "regs->tstate" so we can restore them later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Disable PIL interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * - Set regs->tpc to point to kprobe->ainsn.insn[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * - Mark that we are actively in a kprobe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * At this point we wait for the second breakpoint at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * kprobe->ainsn.insn[1] to hit. When it does we:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * - Run the post-handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * - Set regs->tpc to "remembered" regs->tnpc stored above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * restore the PIL interrupt level in "regs->tstate" as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * - Make any adjustments necessary to regs->tnpc in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * to handle relative branches correctly. See below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * - Mark that we are no longer actively in a kprobe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int __kprobes arch_prepare_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if ((unsigned long) p->addr & 0x3UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) p->ainsn.insn[0] = *p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) flushi(&p->ainsn.insn[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) flushi(&p->ainsn.insn[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) p->opcode = *p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void __kprobes arch_arm_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *p->addr = BREAKPOINT_INSTRUCTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) flushi(p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void __kprobes arch_disarm_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *p->addr = p->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) flushi(p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) kcb->prev_kprobe.kp = kprobe_running();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) kcb->prev_kprobe.status = kcb->kprobe_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kcb->kprobe_status = kcb->prev_kprobe.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct kprobe_ctlblk *kcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __this_cpu_write(current_kprobe, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kcb->kprobe_orig_tnpc = regs->tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct kprobe_ctlblk *kcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) regs->tstate |= TSTATE_PIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*single step inline, if it a breakpoint instruction*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (p->opcode == BREAKPOINT_INSTRUCTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) regs->tpc = (unsigned long) p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) regs->tnpc = kcb->kprobe_orig_tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) regs->tpc = (unsigned long) &p->ainsn.insn[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) regs->tnpc = (unsigned long) &p->ainsn.insn[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int __kprobes kprobe_handler(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct kprobe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void *addr = (void *) regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct kprobe_ctlblk *kcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * We don't want to be preempted for the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * duration of kprobe processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) kcb = get_kprobe_ctlblk();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (kprobe_running()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) p = get_kprobe(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (kcb->kprobe_status == KPROBE_HIT_SS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) kcb->kprobe_orig_tstate_pil);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto no_kprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* We have reentered the kprobe_handler(), since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * another probe was hit while within the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * We here save the original kprobes variables and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * just single step on the instruction of the new probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * without calling any user handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) save_previous_kprobe(kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) set_current_kprobe(p, regs, kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) kprobes_inc_nmissed_count(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) kcb->kprobe_status = KPROBE_REENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) prepare_singlestep(p, regs, kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* The breakpoint instruction was removed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * another cpu right after we hit, no further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * handling of this interrupt is appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto no_kprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) p = get_kprobe(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * The breakpoint instruction was removed right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * after we hit it. Another cpu has removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * either a probepoint or a debugger breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * at this address. In either case, no further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * handling of this interrupt is appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Not one of ours: let kernel handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto no_kprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) set_current_kprobe(p, regs, kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kcb->kprobe_status = KPROBE_HIT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (p->pre_handler && p->pre_handler(p, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) reset_current_kprobe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) prepare_singlestep(p, regs, kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) kcb->kprobe_status = KPROBE_HIT_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) no_kprobe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* If INSN is a relative control transfer instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * return the corrected branch destination value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * regs->tpc and regs->tnpc still hold the values of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * program counters at the time of trap due to the execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long real_pc = (unsigned long) p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Branch not taken, no mods necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (regs->tnpc == regs->tpc + 0x4UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return real_pc + 0x8UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* The three cases are call, branch w/prediction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * and traditional branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if ((insn & 0xc0000000) == 0x40000000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (insn & 0xc1c00000) == 0x00400000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (insn & 0xc1c00000) == 0x00800000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned long ainsn_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ainsn_addr = (unsigned long) &p->ainsn.insn[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* The instruction did all the work for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * already, just apply the offset to the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * instruction location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return (real_pc + (regs->tnpc - ainsn_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* It is jmpl or some other absolute PC modification instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * leave NPC as-is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return regs->tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* If INSN is an instruction which writes it's PC location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * into a destination register, fix that up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long real_pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Simplest case is 'call', which always uses %o7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if ((insn & 0xc0000000) == 0x40000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) slot = ®s->u_regs[UREG_I7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* 'jmpl' encodes the register inside of the opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if ((insn & 0xc1f80000) == 0x81c00000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long rd = ((insn >> 25) & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (rd <= 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) slot = ®s->u_regs[rd];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Hard case, it goes onto the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) flushw_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rd -= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) slot = (unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) (regs->u_regs[UREG_FP] + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) slot += rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (slot != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *slot = real_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Called after single-stepping. p->addr is the address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * instruction which has been replaced by the breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * instruction. To avoid the SMP problems that can occur when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * temporarily put back the original opcode to single-step, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * single-stepped a copy of the instruction. The address of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * copy is &p->ainsn.insn[0].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * This function prepares to return from the post-single-step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * breakpoint trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void __kprobes resume_execution(struct kprobe *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct pt_regs *regs, struct kprobe_ctlblk *kcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 insn = p->ainsn.insn[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) regs->tnpc = relbranch_fixup(insn, p, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* This assignment must occur after relbranch_fixup() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) regs->tpc = kcb->kprobe_orig_tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) retpc_fixup(regs, insn, (unsigned long) p->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) kcb->kprobe_orig_tstate_pil);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static int __kprobes post_kprobe_handler(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct kprobe *cur = kprobe_running();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) kcb->kprobe_status = KPROBE_HIT_SSDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) cur->post_handler(cur, regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) resume_execution(cur, regs, kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*Restore back the original saved kprobes variables and continue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (kcb->kprobe_status == KPROBE_REENTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) restore_previous_kprobe(kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) reset_current_kprobe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct kprobe *cur = kprobe_running();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) switch(kcb->kprobe_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) case KPROBE_HIT_SS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) case KPROBE_REENTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * We are here because the instruction being single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * stepped caused a page fault. We reset the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * kprobe and the tpc points back to the probe address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * and allow the page fault handler to continue as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * normal page fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) regs->tpc = (unsigned long)cur->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) regs->tnpc = kcb->kprobe_orig_tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) kcb->kprobe_orig_tstate_pil);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (kcb->kprobe_status == KPROBE_REENTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) restore_previous_kprobe(kcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) reset_current_kprobe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) case KPROBE_HIT_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) case KPROBE_HIT_SSDONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * We increment the nmissed count for accounting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * we can also use npre/npostfault count for accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * these specific fault cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) kprobes_inc_nmissed_count(cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * We come here because instructions in the pre/post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * handler caused the page_fault, this could happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * if handler tries to access user space by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * copy_from_user(), get_user() etc. Let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * user-specified handler try to fix it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * In case the user-specified fault handler returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * zero, try to fix up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * fixup_exception() could not handle it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Let do_page_fault() fix it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Wrapper routine to for handling exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct die_args *args = (struct die_args *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (args->regs && user_mode(args->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case DIE_DEBUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (kprobe_handler(args->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case DIE_DEBUG_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (post_kprobe_handler(args->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret = NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) BUG_ON(trap_level != 0x170 && trap_level != 0x171);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) bad_trap(regs, trap_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* trap_level == 0x170 --> ta 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * trap_level == 0x171 --> ta 0x71
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) (trap_level == 0x170) ? "debug" : "debug_2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) bad_trap(regs, trap_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* The value stored in the return address register is actually 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * instructions before where the callee will return to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Sequences usually look something like this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * call some_function <--- return register points here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * nop <--- call delay slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * whatever <--- where callee returns to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * To keep trampoline_probe_handler logic simpler, we normalize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * value kept in ri->ret_addr so we don't need to keep adjusting it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * back and forth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ri->fp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Replace the return addr with trampoline addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) regs->u_regs[UREG_RETPC] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ((unsigned long)kretprobe_trampoline) - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Called when the probe at kretprobe trampoline is hit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int __kprobes trampoline_probe_handler(struct kprobe *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned long orig_ret_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) regs->tpc = orig_ret_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) regs->tnpc = orig_ret_address + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * By returning a non-zero value, we are telling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * kprobe_handler() that we don't want the post_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * to run (and have re-enabled preemption)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static void __used kretprobe_trampoline_holder(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) asm volatile(".global kretprobe_trampoline\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) "kretprobe_trampoline:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) "\tnop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) "\tnop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static struct kprobe trampoline_p = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .pre_handler = trampoline_probe_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int __init arch_init_kprobes(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return register_kprobe(&trampoline_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int __kprobes arch_trampoline_kprobe(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }