^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Code for replacing ftrace calls with jumps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Added function graph tracer code, taken from x86 that was written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) "ftrace-powerpc: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/code-patching.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * We generally only have a single long_branch tramp and at most 2 or 3 plt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * tramps generated. But, we don't use the plt tramps currently. We also allot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * tramps in total. Set aside 8 just to be sure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define NUM_FTRACE_TRAMPS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct ppc_inst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) addr = ppc_function_entry((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* if (link) set op to 'bl' else 'b' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct ppc_inst replaced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * We are paranoid about modifying text, as if a bug was to happen, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * could cause us to read or write to someplace that could cause harm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Carefully read and modify the code with probe_kernel_*(), and make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * sure what we read is what we expected it to be before modifying it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* read the text we want to modify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (probe_kernel_read_inst(&replaced, (void *)ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Make sure it is what we expect it to be */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!ppc_inst_equal(replaced, old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pr_err("%p: replaced (%s) != old (%s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* replace the text with the new text */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (patch_instruction((struct ppc_inst *)ip, new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Helper functions that are the same for both PPC64 and PPC32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int test_24bit_addr(unsigned long ip, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) addr = ppc_function_entry((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* use the create_branch to verify that this offset can be branched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int is_bl_op(struct ppc_inst op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int is_b_op(struct ppc_inst op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) offset = (ppc_inst_val(op) & 0x03fffffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* make it signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (offset & 0x02000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) offset |= 0xfe000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return ip + (long)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __ftrace_make_nop(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long entry, ptr, tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct ppc_inst op, pop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* read where this goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (probe_kernel_read_inst(&op, (void *)ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pr_err("Fetching opcode failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Make sure that that this is still a 24bit jump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!is_bl_op(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* lets find where the pointer goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) tramp = find_bl_target(ip, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pr_devel("ip:%lx jumps to %lx", ip, tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (module_trampoline_target(mod, tramp, &ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pr_err("Failed to get trampoline target\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pr_devel("trampoline target %lx", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) entry = ppc_global_function_entry((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* This should match what was called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (ptr != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pr_err("addr %lx does not match expected %lx\n", ptr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #ifdef CONFIG_MPROFILE_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* When using -mkernel_profile there is no load to jump over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pop = ppc_inst(PPC_INST_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (probe_kernel_read_inst(&op, (void *)(ip - 4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pr_err("Fetching instruction at %lx failed.\n", ip - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pr_err("Unexpected instruction %s around bl _mcount\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Our original call site looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * bl <tramp>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * ld r2,XX(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Milton Miller pointed out that we can not simply nop the branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * If a task was preempted when calling a trace function, the nops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * will remove the way to restore the TOC in r2 and the r2 TOC will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * get corrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Use a b +8 to jump over the load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Check what is in the next instruction. We can see ld r2,40(r1), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * on first pass after boot we will see mflr r0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (probe_kernel_read_inst(&op, (void *)(ip + 4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pr_err("Fetching op failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_err("Expected %08x found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #endif /* CONFIG_MPROFILE_KERNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (patch_instruction((struct ppc_inst *)ip, pop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pr_err("Patching NOP failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #else /* !PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __ftrace_make_nop(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned int jmp[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Make sure that that this is still a 24bit jump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!is_bl_op(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* lets find where the pointer goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) tramp = find_bl_target(ip, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * On PPC32 the trampoline looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * 0x4e, 0x80, 0x04, 0x20 bctr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pr_devel("ip:%lx jumps to %lx", ip, tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Find where the trampoline jumps to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (copy_from_kernel_nofault(jmp, (void *)tramp, sizeof(jmp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) pr_err("Failed to read %lx\n", tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pr_devel(" %08x %08x ", jmp[0], jmp[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* verify that this is what we expect it to be */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ((jmp[1] & 0xffff0000) != 0x398c0000) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) (jmp[2] != 0x7d8903a6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) (jmp[3] != 0x4e800420)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pr_err("Not a trampoline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tramp = (jmp[1] & 0xffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ((jmp[0] & 0xffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (tramp & 0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) tramp -= 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pr_devel(" %lx ", tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (tramp != addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pr_err("Trampoline location %08lx does not match addr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) op = ppc_inst(PPC_INST_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (patch_instruction((struct ppc_inst *)ip, op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif /* PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static unsigned long find_ftrace_tramp(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct ppc_inst instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * We have the compiler generated long_branch tramps at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * and we prefer those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (!ftrace_tramps[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) else if (create_branch(&instr, (void *)ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ftrace_tramps[i], 0) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return ftrace_tramps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int add_ftrace_tramp(unsigned long tramp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!ftrace_tramps[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ftrace_tramps[i] = tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * If this is a compiler generated long_branch trampoline (essentially, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * trampoline that has a branch to _mcount()), we re-write the branch to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * instead go to ftrace_[regs_]caller() and note down the location of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int setup_mcount_compiler_tramp(unsigned long tramp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct ppc_inst instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Is this a known long jump tramp? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!ftrace_tramps[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) else if (ftrace_tramps[i] == tramp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Is this a known plt tramp? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!ftrace_plt_tramps[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) else if (ftrace_plt_tramps[i] == tramp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* New trampoline -- read where this goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (probe_kernel_read_inst(&op, (void *)tramp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pr_debug("Fetching opcode failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Is this a 24 bit branch? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!is_b_op(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pr_debug("Trampoline is not a long branch tramp.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* lets find where the pointer goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ptr = find_bl_target(tramp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ptr != ppc_global_function_entry((void *)_mcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Let's re-write the tramp to go to ftrace_[regs_]caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ptr = ppc_global_function_entry((void *)ftrace_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (create_branch(&instr, (void *)tramp, ptr, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pr_debug("%ps is not reachable from existing mcount tramp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) (void *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) pr_debug("REL24 out of range!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (add_ftrace_tramp(tramp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pr_debug("No tramp locations left\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned long tramp, ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Read where this goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (probe_kernel_read_inst(&op, (void *)ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pr_err("Fetching opcode failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Make sure that that this is still a 24bit jump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!is_bl_op(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* Let's find where the pointer goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tramp = find_bl_target(ip, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pr_devel("ip:%lx jumps to %lx", ip, tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (setup_mcount_compiler_tramp(tramp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Are other trampolines reachable? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!find_ftrace_tramp(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pr_err("No ftrace trampolines reachable from %ps\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) (void *)ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) pr_err("Patching NOP failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int ftrace_make_nop(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct ppc_inst old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * If the calling address is more that 24 bits away,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * then we had to use a trampoline to make the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Otherwise just update the call site.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (test_24bit_addr(ip, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* within range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) old = ftrace_call_replace(ip, addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) new = ppc_inst(PPC_INST_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) } else if (core_kernel_text(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return __ftrace_make_nop_kernel(rec, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Out of range jumps are called from modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * We should either already have a pointer to the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * or it has been passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!rec->arch.mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pr_err("No module loaded addr=%lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rec->arch.mod = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) } else if (mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (mod != rec->arch.mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) pr_err("Record mod %p not equal to passed in mod %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) rec->arch.mod, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* nothing to do if mod == rec->arch.mod */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mod = rec->arch.mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return __ftrace_make_nop(mod, rec, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* We should not get here without modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Examine the existing instructions for __ftrace_make_call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * They should effectively be a NOP, and follow formal constraints,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * depending on the ABI. Return false if they don't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #ifndef CONFIG_MPROFILE_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * We expect to see:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * b +8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * ld r2,XX(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * The load offset is different depending on the ABI. For simplicity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * just mask it out when doing the compare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* look for patched "NOP" on ppc64 with -mprofile-kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct ppc_inst op[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct ppc_inst instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) void *ip = (void *)rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned long entry, ptr, tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct module *mod = rec->arch.mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* read where this goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (probe_kernel_read_inst(op, ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (probe_kernel_read_inst(op + 1, ip + 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!expected_nop_sequence(ip, op[0], op[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pr_err("Unexpected call sequence at %p: %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* If we never set up ftrace trampoline(s), then bail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!mod->arch.tramp || !mod->arch.tramp_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!mod->arch.tramp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) pr_err("No ftrace trampoline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (rec->flags & FTRACE_FL_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) tramp = mod->arch.tramp_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tramp = mod->arch.tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (module_trampoline_target(mod, tramp, &ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) pr_err("Failed to get trampoline target\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pr_devel("trampoline target %lx", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) entry = ppc_global_function_entry((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* This should match what was called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (ptr != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pr_err("addr %lx does not match expected %lx\n", ptr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* Ensure branch is within 24 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pr_err("Branch out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pr_err("REL24 out of range!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #else /* !CONFIG_PPC64: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* read where this goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (probe_kernel_read_inst(&op, (void *)ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* It should be pointing to a nop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* If we never set up a trampoline to ftrace_caller, then bail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!rec->arch.mod->arch.tramp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) pr_err("No ftrace trampoline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* create the branch to the trampoline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err = create_branch(&op, (struct ppc_inst *)ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pr_err("REL24 out of range!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pr_devel("write to %lx\n", rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (patch_instruction((struct ppc_inst *)ip, op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) void *ip = (void *)rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned long tramp, entry, ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Make sure we're being asked to patch branch to a known ftrace addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) entry = ppc_global_function_entry((void *)ftrace_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ptr = ppc_global_function_entry((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (ptr != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) entry = ppc_global_function_entry((void *)ftrace_regs_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (ptr != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Make sure we have a nop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (probe_kernel_read_inst(&op, ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pr_err("Unable to read ftrace location %p\n", ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) tramp = find_ftrace_tramp((unsigned long)ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!tramp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pr_err("No ftrace trampolines reachable from %ps\n", ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pr_err("Error patching branch to ftrace tramp!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct ppc_inst old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * If the calling address is more that 24 bits away,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * then we had to use a trampoline to make the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Otherwise just update the call site.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (test_24bit_addr(ip, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* within range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) old = ppc_inst(PPC_INST_NOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) new = ftrace_call_replace(ip, addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) } else if (core_kernel_text(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return __ftrace_make_call_kernel(rec, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Out of range jumps are called from modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * Being that we are converting from nop, it had better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * already have a module defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!rec->arch.mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) pr_err("No module loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return __ftrace_make_call(rec, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* We should not get here without modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct ppc_inst op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long entry, ptr, tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct module *mod = rec->arch.mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* If we never set up ftrace trampolines, then bail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (!mod->arch.tramp || !mod->arch.tramp_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) pr_err("No ftrace trampoline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* read where this goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (probe_kernel_read_inst(&op, (void *)ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) pr_err("Fetching opcode failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Make sure that that this is still a 24bit jump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!is_bl_op(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* lets find where the pointer goes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tramp = find_bl_target(ip, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) entry = ppc_global_function_entry((void *)old_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) pr_devel("ip:%lx jumps to %lx", ip, tramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (tramp != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* old_addr is not within range, so we must have used a trampoline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (module_trampoline_target(mod, tramp, &ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) pr_err("Failed to get trampoline target\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) pr_devel("trampoline target %lx", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* This should match what was called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (ptr != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) pr_err("addr %lx does not match expected %lx\n", ptr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* The new target may be within range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (test_24bit_addr(ip, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* within range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) pr_err("REL24 out of range!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (rec->flags & FTRACE_FL_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) tramp = mod->arch.tramp_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) tramp = mod->arch.tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (module_trampoline_target(mod, tramp, &ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) pr_err("Failed to get trampoline target\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) pr_devel("trampoline target %lx", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) entry = ppc_global_function_entry((void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* This should match what was called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (ptr != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pr_err("addr %lx does not match expected %lx\n", ptr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Ensure branch is within 24 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) pr_err("Branch out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pr_err("REL24 out of range!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct ppc_inst old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * If the calling address is more that 24 bits away,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * then we had to use a trampoline to make the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Otherwise just update the call site.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* within range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) old = ftrace_call_replace(ip, old_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) new = ftrace_call_replace(ip, addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) } else if (core_kernel_text(ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * We always patch out of range locations to go to the regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * variant, so there is nothing to do here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * Out of range jumps are called from modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!rec->arch.mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pr_err("No module loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return __ftrace_modify_call(rec, old_addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* We should not get here without modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) int ftrace_update_ftrace_func(ftrace_func_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned long ip = (unsigned long)(&ftrace_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct ppc_inst old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) old = ppc_inst_read((struct ppc_inst *)&ftrace_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) new = ftrace_call_replace(ip, (unsigned long)func, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ret = ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Also update the regs callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) ip = (unsigned long)(&ftrace_regs_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) old = ppc_inst_read((struct ppc_inst *)&ftrace_regs_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) new = ftrace_call_replace(ip, (unsigned long)func, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * Use the default ftrace_modify_all_code, but without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * stop_machine().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) void arch_ftrace_update_code(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ftrace_modify_all_code(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) #define PACATOC offsetof(struct paca_struct, kernel_toc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) int __init ftrace_dyn_arch_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) u32 stub_insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 0x3d8c0000, /* addis r12,r12,<high> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 0x398c0000, /* addi r12,r12,<low> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 0x7d8903a6, /* mtctr r12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 0x4e800420, /* bctr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) long reladdr = addr - kernel_toc_addr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pr_err("Address of %ps out of range of kernel_toc.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) (void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) memcpy(tramp[i], stub_insns, sizeof(stub_insns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) tramp[i][1] |= PPC_HA(reladdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) tramp[i][2] |= PPC_LO(reladdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) add_ftrace_tramp((unsigned long)tramp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int __init ftrace_dyn_arch_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) extern void ftrace_graph_call(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) extern void ftrace_graph_stub(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) int ftrace_enable_ftrace_graph_caller(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) unsigned long ip = (unsigned long)(&ftrace_graph_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) unsigned long addr = (unsigned long)(&ftrace_graph_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) unsigned long stub = (unsigned long)(&ftrace_graph_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct ppc_inst old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) old = ftrace_call_replace(ip, stub, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) new = ftrace_call_replace(ip, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int ftrace_disable_ftrace_graph_caller(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) unsigned long ip = (unsigned long)(&ftrace_graph_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) unsigned long addr = (unsigned long)(&ftrace_graph_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) unsigned long stub = (unsigned long)(&ftrace_graph_stub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct ppc_inst old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) old = ftrace_call_replace(ip, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) new = ftrace_call_replace(ip, stub, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return ftrace_modify_code(ip, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Hook the return address and push it in the stack of return addrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * in current thread info. Return the address we want to divert to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) unsigned long return_hooker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (unlikely(ftrace_graph_is_dead()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (unlikely(atomic_read(¤t->tracing_graph_pause)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return_hooker = ppc_function_entry(return_to_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) parent = return_hooker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) #ifdef PPC64_ELF_ABI_v1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) char *arch_ftrace_match_adjust(char *str, const char *search)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (str[0] == '.' && search[0] != '.')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return str + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) #endif /* PPC64_ELF_ABI_v1 */