Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * User-space Probes (UProbes) for powerpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright IBM Corporation, 2007-2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/uprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/sstep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define UPROBE_TRAP_NR	UINT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * is_trap_insn - check if the instruction is a trap variant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * @insn: instruction to be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Returns true if @insn is a trap variant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) bool is_trap_insn(uprobe_opcode_t *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	return (is_trap(*insn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * arch_uprobe_analyze_insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * @mm: the probed address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * @arch_uprobe: the probepoint information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * @addr: vaddr to probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * Return 0 on success or a -ve number on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	if (addr & 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * arch_uprobe_pre_xol - prepare to execute out of line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * @auprobe: the probepoint information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * @regs: reflects the saved user state of current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct arch_uprobe_task *autask = &current->utask->autask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	autask->saved_trap_nr = current->thread.trap_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	current->thread.trap_nr = UPROBE_TRAP_NR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	regs->nip = current->utask->xol_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	user_enable_single_step(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * @regs: Reflects the saved state of the task after it has hit a breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Return the address of the breakpoint instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	return instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * then detect the case where a singlestepped instruction jumps back to its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * own address. It is assumed that anything like do_page_fault/do_trap/etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * sets thread.trap_nr != UINT_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) bool arch_uprobe_xol_was_trapped(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (t->thread.trap_nr != UPROBE_TRAP_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * Called after single-stepping. To avoid the SMP problems that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * occur when we temporarily put back the original opcode to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * single-step, we single-stepped a copy of the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * This function prepares to resume execution after the single-step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct uprobe_task *utask = current->utask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	current->thread.trap_nr = utask->autask.saved_trap_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 * On powerpc, except for loads and stores, most instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * including ones that alter code flow (branches, calls, returns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * are emulated in the kernel. We get here only if the emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * support doesn't exist and have to fix-up the next instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * to be executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, &auprobe->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	user_disable_single_step(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* callback routine for handling exceptions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int arch_uprobe_exception_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	struct die_args *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct pt_regs *regs = args->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	/* regs == NULL is a kernel bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (WARN_ON(!regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	/* We are only interested in userspace traps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	case DIE_BPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (uprobe_pre_sstep_notifier(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			return NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	case DIE_SSTEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		if (uprobe_post_sstep_notifier(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			return NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * This function gets called when XOL instruction either gets trapped or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * the thread has a fatal signal, so reset the instruction pointer to its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * probed address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	struct uprobe_task *utask = current->utask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	current->thread.trap_nr = utask->autask.saved_trap_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	instruction_pointer_set(regs, utask->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	user_disable_single_step(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * See if the instruction can be emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * Returns true if instruction was emulated, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * emulate_step() returns 1 if the insn was successfully emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * For all other cases, we need to single-step in hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	ret = emulate_step(regs, ppc_inst_read(&auprobe->insn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	unsigned long orig_ret_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	orig_ret_vaddr = regs->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	/* Replace the return addr with trampoline addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	regs->link = trampoline_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return orig_ret_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 				struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (ctx == RP_CHECK_CHAIN_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return regs->gpr[1] <= ret->stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		return regs->gpr[1] < ret->stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }