Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Code for replacing ftrace calls with jumps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Thanks goes to Ingo Molnar, for suggesting the idea.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Mathieu Desnoyers, for suggesting postponing the modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Arjan van de Ven, for keeping me straight, and explaining to me
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * the dangers of modifying code on the run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <trace/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static unsigned char ftrace_nop[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * If we're trying to nop out a call to a function, we instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * place a call to the address after the memory table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * 8c011060 <a>:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * 8c011060:       02 d1           mov.l   8c01106c <a+0xc>,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * 8c011062:       22 4f           sts.l   pr,@-r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * 8c011064:       02 c7           mova    8c011070 <a+0x10>,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * 8c011066:       2b 41           jmp     @r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * 8c011068:       2a 40           lds     r0,pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * 8c01106a:       09 00           nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * 8c01106c:       68 24           .word 0x2468     <--- ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * 8c01106e:       1d 8c           .word 0x8c1d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * 8c011070:       26 4f           lds.l   @r15+,pr <--- ip + MCOUNT_INSN_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * past the _mcount call and continue executing code like normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static unsigned char *ftrace_nop_replace(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	__raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	return ftrace_nop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* Place the address in the memory table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	__raw_writel(addr, ftrace_replaced_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * No locking needed, this must be called via kstop_machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * which in essence is like running on a uniprocessor machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	return ftrace_replaced_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * Modifying code must take extra care. On an SMP machine, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * the code being modified is also being executed on another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * that CPU will have undefined results and possibly take a GPF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * We use kstop_machine to stop other CPUS from exectuing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * But this does not stop NMIs from happening. We still need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * to protect against that. We separate out the modification of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * the code to take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * Two buffers are added: An IP buffer and a "code" buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * 1) Put the instruction pointer into the IP buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  *    and the new code into the "code" buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * 2) Wait for any running NMIs to finish and set a flag that says
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *    we are modifying code, it is done in an atomic operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * 3) Write the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * 4) clear the flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * 5) Wait for any running NMIs to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * If an NMI is executed, the first thing it does is to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * "ftrace_nmi_enter". This will check if the flag is set to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * and if it is, it will write what is in the IP and "code" buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * The trick is, it does not matter if everyone is writing the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * content to the code location. Also, if a CPU is executing code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * it is OK to write to that code location if the contents being written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * are the same as what exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define MOD_CODE_WRITE_FLAG (1 << 31)	/* set when NMI should do the write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static atomic_t nmi_running = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static int mod_code_status;		/* holds return value of text write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static void *mod_code_ip;		/* holds the IP to write to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static void *mod_code_newcode;		/* holds the text to write to the IP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void clear_mod_flag(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int old = atomic_read(&nmi_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		int new = old & ~MOD_CODE_WRITE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		if (old == new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		old = atomic_cmpxchg(&nmi_running, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void ftrace_mod_code(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * Yes, more than one CPU process can be writing to mod_code_status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 *    (and the code itself)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * But if one were to fail, then they all should, and if one were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * to succeed, then they all should.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mod_code_status = copy_to_kernel_nofault(mod_code_ip, mod_code_newcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 					     MCOUNT_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/* if we fail, then kill any new writers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (mod_code_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		clear_mod_flag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void arch_ftrace_nmi_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		ftrace_mod_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* Must have previous changes seen before executions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void arch_ftrace_nmi_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	/* Finish all executions before clearing nmi_running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	atomic_dec(&nmi_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void wait_for_nmi_and_set_mod_flag(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void wait_for_nmi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (!atomic_read(&nmi_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	} while (atomic_read(&nmi_running));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) do_ftrace_mod_code(unsigned long ip, void *new_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	mod_code_ip = (void *)ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	mod_code_newcode = new_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	/* The buffers need to be visible before we let NMIs write them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	wait_for_nmi_and_set_mod_flag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/* Make sure all running NMIs have finished before we write the code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	ftrace_mod_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/* Make sure the write happens before clearing the bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	clear_mod_flag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	wait_for_nmi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return mod_code_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		       unsigned char *new_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	unsigned char replaced[MCOUNT_INSN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * We are paranoid about modifying text, as if a bug was to happen, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 * could cause us to read or write to someplace that could cause harm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 * Carefully read and modify the code with probe_kernel_*(), and make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 * sure what we read is what we expected it to be before modifying it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	/* read the text we want to modify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	/* Make sure it is what we expect it to be */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/* replace the text with the new text */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (do_ftrace_mod_code(ip, new_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int ftrace_update_ftrace_func(ftrace_func_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	unsigned char old[MCOUNT_INSN_SIZE], *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	new = ftrace_call_replace(ip, (unsigned long)func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ftrace_make_nop(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		    struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	unsigned char *new, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	old = ftrace_call_replace(ip, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	new = ftrace_nop_replace(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return ftrace_modify_code(rec->ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	unsigned char *new, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	old = ftrace_nop_replace(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	new = ftrace_call_replace(ip, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	return ftrace_modify_code(rec->ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int __init ftrace_dyn_arch_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) extern void ftrace_graph_call(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int ftrace_mod(unsigned long ip, unsigned long old_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		      unsigned long new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	unsigned char code[MCOUNT_INSN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (copy_from_kernel_nofault(code, (void *)ip, MCOUNT_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (old_addr != __raw_readl((unsigned long *)code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	__raw_writel(new_addr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int ftrace_enable_ftrace_graph_caller(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	unsigned long ip, old_addr, new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	old_addr = (unsigned long)(&skip_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	new_addr = (unsigned long)(&ftrace_graph_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return ftrace_mod(ip, old_addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int ftrace_disable_ftrace_graph_caller(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	unsigned long ip, old_addr, new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	old_addr = (unsigned long)(&ftrace_graph_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	new_addr = (unsigned long)(&skip_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	return ftrace_mod(ip, old_addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * Hook the return address and push it in the stack of return addrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * in the current thread info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * This is the main routine for the function graph tracer. The function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * graph tracer essentially works like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  * parent is the stack address containing self_addr's return address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  * We pull the real return address out of parent and store it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * current's ret_stack. Then, we replace the return address on the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * with the address of return_to_handler. self_addr is the function that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  * called mcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * When self_addr returns, it will jump to return_to_handler which calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * return address off of current's ret_stack and jump to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	unsigned long old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	int faulted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	unsigned long return_hooker = (unsigned long)&return_to_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (unlikely(ftrace_graph_is_dead()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 * Protect against fault, even if it shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 * happen. This tool is too much intrusive to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	 * ignore such a protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		"1:						\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		"mov.l		@%2, %0				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		"2:						\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		"mov.l		%3, @%2				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		"mov		#0, %1				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		"3:						\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		".section .fixup, \"ax\"			\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		"4:						\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		"mov.l		5f, %0				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		"jmp		@%0				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		" mov		#1, %1				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		".balign 4					\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		"5:	.long 3b				\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		".previous					\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		".section __ex_table,\"a\"			\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		".long 1b, 4b					\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		".long 2b, 4b					\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		".previous					\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		: "=&r" (old), "=r" (faulted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		: "r" (parent), "r" (return_hooker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (unlikely(faulted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		ftrace_graph_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (function_graph_enter(old, self_addr, 0, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		__raw_writel(old, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */