^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Code for Kernel probes Jump optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2017, Anju T, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/code-patching.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/sstep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TMPL_CALL_HDLR_IDX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) (optprobe_template_call_handler - optprobe_template_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define TMPL_EMULATE_IDX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) (optprobe_template_call_emulate - optprobe_template_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define TMPL_RET_IDX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) (optprobe_template_ret - optprobe_template_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define TMPL_OP_IDX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) (optprobe_template_op_address - optprobe_template_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define TMPL_INSN_IDX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) (optprobe_template_insn - optprobe_template_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define TMPL_END_IDX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) (optprobe_template_end - optprobe_template_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) DEFINE_INSN_CACHE_OPS(ppc_optinsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static bool insn_page_in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void *__ppc_alloc_insn_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (insn_page_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) insn_page_in_use = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return &optinsn_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void __ppc_free_insn_page(void *page __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) insn_page_in_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* insn_size initialized later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .alloc = __ppc_alloc_insn_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .free = __ppc_free_insn_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .nr_garbage = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Check if we can optimize this probe. Returns NIP post-emulation if this can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * be optimized and 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static unsigned long can_optimize(struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct pt_regs regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct instruction_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned long nip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * kprobe placed for kretprobe during boot time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * has a 'nop' instruction, which can be emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * So further checks can be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * We only support optimizing kernel addresses, but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * module addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * FIXME: Optimize kprobes placed in module addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!is_kernel_addr((unsigned long)p->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) memset(®s, 0, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) regs.nip = (unsigned long)p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) regs.trap = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) regs.msr = MSR_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Kprobe placed in conditional branch instructions are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * not optimized, as we can't predict the nip prior with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * dummy pt_regs and can not ensure that the return branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * from detour buffer falls in the range of address (i.e 32MB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * A branch back from trampoline is set up in the detour buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * to the nip returned by the analyse_instr() here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Ensure that the instruction is not a conditional branch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * and that can be emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) analyse_instr(&op, ®s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) emulate_update_regs(®s, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) nip = regs.nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void optimized_callback(struct optimized_kprobe *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* This is possible if op is under delayed unoptimizing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (kprobe_disabled(&op->kp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (kprobe_running()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) kprobes_inc_nmissed_count(&op->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __this_cpu_write(current_kprobe, &op->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) regs->nip = (unsigned long)op->kp.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) opt_pre_handler(&op->kp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __this_cpu_write(current_kprobe, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) NOKPROBE_SYMBOL(optimized_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (op->optinsn.insn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) free_ppc_optinsn_slot(op->optinsn.insn, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) op->optinsn.insn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * emulate_step() requires insn to be emulated as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * second parameter. Load register 'r4' with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* addis r4,0,(insn)@h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ((val >> 16) & 0xffff)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* ori r4,r4,(insn)@l */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ppc_inst(PPC_INST_ORI | ___PPC_RA(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ___PPC_RS(4) | (val & 0xffff)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Generate instructions to load provided immediate 64-bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * to register 'reg' and patch these instructions at 'addr'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* lis reg,(op)@highest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ((val >> 48) & 0xffff)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* ori reg,reg,(op)@higher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* rldicr reg,reg,32,31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* oris reg,reg,(op)@h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* ori reg,reg,(op)@l */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) patch_instruction((struct ppc_inst *)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ___PPC_RS(reg) | (val & 0xffff)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct ppc_inst branch_op_callback, branch_emulate_step, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) long b_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long nip, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) nip = can_optimize(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!nip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Allocate instruction slot for detour buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) buff = get_ppc_optinsn_slot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * The target address has to be relatively nearby, to permit use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * of branch instruction in powerpc, because the address is specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * in an immediate field in the instruction opcode itself, ie 24 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * in the opcode specify the address. Therefore the address should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * be within 32MB on either side of the current instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) b_offset = (unsigned long)buff - (unsigned long)p->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!is_offset_in_branch_range(b_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Check if the return address is also within 32MB range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) (unsigned long)nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!is_offset_in_branch_range(b_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Setup template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* We can optimize this via patch_instruction_window later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pr_devel("Copying template to %p, size %lu\n", buff, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rc = patch_instruction((struct ppc_inst *)(buff + i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ppc_inst(*(optprobe_template_entry + i)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Fixup the template with instructions to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * 1. load the address of the actual probepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * 2. branch to optimized_callback() and emulate_step()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!op_callback_addr || !emulate_step_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) rc = create_branch(&branch_op_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) (unsigned long)op_callback_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) BRANCH_SET_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rc |= create_branch(&branch_emulate_step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) (unsigned long)emulate_step_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) BRANCH_SET_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) branch_op_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) branch_emulate_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * 3. load instruction to be emulated into relevant register, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * 4. branch back from trampoline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) flush_icache_range((unsigned long)buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) (unsigned long)(&buff[TMPL_END_IDX]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) op->optinsn.insn = buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) free_ppc_optinsn_slot(buff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return optinsn->insn != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * On powerpc, Optprobes always replaces one instruction (4 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * aligned and 4 bytes long). It is impossible to encounter another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * kprobe in this address range. So always return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int arch_check_optimized_kprobe(struct optimized_kprobe *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void arch_optimize_kprobes(struct list_head *oplist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct ppc_inst instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct optimized_kprobe *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) list_for_each_entry_safe(op, tmp, oplist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Backup instructions which will be replaced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * by jump address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) memcpy(op->optinsn.copied_insn, op->kp.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) RELATIVEJUMP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) create_branch(&instr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) (struct ppc_inst *)op->kp.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) (unsigned long)op->optinsn.insn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) patch_instruction((struct ppc_inst *)op->kp.addr, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) void arch_unoptimize_kprobe(struct optimized_kprobe *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) arch_arm_kprobe(&op->kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) void arch_unoptimize_kprobes(struct list_head *oplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct list_head *done_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct optimized_kprobe *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct optimized_kprobe *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_for_each_entry_safe(op, tmp, oplist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) arch_unoptimize_kprobe(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) list_move(&op->list, done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int arch_within_optimized_kprobe(struct optimized_kprobe *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return ((unsigned long)op->kp.addr <= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }