^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * using the CPU's debug registers. Derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * "arch/x86/kernel/hw_breakpoint.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2010 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: K.Prasad <prasad@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/sstep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Stores the breakpoints currently in use on each breakpoint address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * register for every cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Returns total number of data or instruction breakpoints available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int hw_breakpoint_slots(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (type == TYPE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return nr_wp_slots();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return 0; /* no instruction breakpoints available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static bool single_step_pending(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (current->thread.last_hit_ubp[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Install a perf counter breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * We seek a free debug address register and use it for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Atomic: we hold the counter->ctx->lock and we only handle variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * and registers local to this cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int arch_install_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct arch_hw_breakpoint *info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct perf_event **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) slot = this_cpu_ptr(&bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!*slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *slot = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Do not install DABR values if the instruction must be single-stepped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * If so, DABR will be populated in single_step_dabr_instruction().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!single_step_pending())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __set_breakpoint(i, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Uninstall the breakpoint contained in the given counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * First we search the debug address register it uses and then we disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Atomic: we hold the counter->ctx->lock and we only handle variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * and registers local to this cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void arch_uninstall_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct arch_hw_breakpoint null_brk = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct perf_event **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) slot = this_cpu_ptr(&bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (*slot == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) __set_breakpoint(i, &null_brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static bool is_ptrace_bp(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return bp->overflow_handler == ptrace_triggered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct breakpoint {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bool ptrace_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static LIST_HEAD(task_bps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct breakpoint *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) tmp->bp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) tmp->ptrace_bp = is_ptrace_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int task_bps_add(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct breakpoint *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) tmp = alloc_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (IS_ERR(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return PTR_ERR(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) list_add(&tmp->list, &task_bps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void task_bps_remove(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct list_head *pos, *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) list_for_each_safe(pos, q, &task_bps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (tmp->bp == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) list_del(&tmp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * If any task has breakpoint from alternate infrastructure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * return true. Otherwise return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static bool all_task_bps_check(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct breakpoint *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) list_for_each_entry(tmp, &task_bps, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!can_co_exist(tmp, bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * If same task has breakpoint from alternate infrastructure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * return true. Otherwise return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static bool same_task_bps_check(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct breakpoint *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list_for_each_entry(tmp, &task_bps, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (tmp->bp->hw.target == bp->hw.target &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) !can_co_exist(tmp, bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int cpu_bps_add(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct breakpoint **cpu_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct breakpoint *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) tmp = alloc_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (IS_ERR(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return PTR_ERR(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!cpu_bp[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cpu_bp[i] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void cpu_bps_remove(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct breakpoint **cpu_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!cpu_bp[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (cpu_bp[i]->bp == bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kfree(cpu_bp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) cpu_bp[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static bool cpu_bps_check(int cpu, struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct breakpoint **cpu_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cpu_bp = per_cpu_ptr(cpu_bps, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static bool all_cpu_bps_check(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (cpu_bps_check(cpu, bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * We don't use any locks to serialize accesses to cpu_bps or task_bps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * because are already inside nr_bp_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int arch_reserve_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* ptrace breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (is_ptrace_bp(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (all_cpu_bps_check(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (same_task_bps_check(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return task_bps_add(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* perf breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (is_kernel_addr(bp->attr.bp_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (bp->hw.target && bp->cpu == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (same_task_bps_check(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return task_bps_add(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) } else if (!bp->hw.target && bp->cpu != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (all_task_bps_check(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return cpu_bps_add(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (same_task_bps_check(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ret = cpu_bps_add(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ret = task_bps_add(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cpu_bps_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void arch_release_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!is_kernel_addr(bp->attr.bp_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (bp->hw.target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) task_bps_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (bp->cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) cpu_bps_remove(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Perform cleanup of arch-specific counters during unregistration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * of the perf-event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void arch_unregister_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * If the breakpoint is unregistered between a hw_breakpoint_handler()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * and the single_step_dabr_instruction(), then cleanup the breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * restoration variables to prevent dangling pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) bp->ctx->task->thread.last_hit_ubp[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Check for virtual address in kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return is_kernel_addr(hw->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int arch_bp_generic_fields(int type, int *gen_bp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *gen_bp_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (type & HW_BRK_TYPE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *gen_bp_type |= HW_BREAKPOINT_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (type & HW_BRK_TYPE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *gen_bp_type |= HW_BREAKPOINT_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (*gen_bp_type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Watchpoint match range is always doubleword(8 bytes) aligned on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * powerpc. If the given range is crossing doubleword boundary, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * need to increase the length such that next doubleword also get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * covered. Ex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * address len = 6 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * |=========.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * |------------v--|------v--------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * | | | | | | | | | | | | | | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * |---------------|---------------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * <---8 bytes--->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * In this case, we should configure hw as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * len = 16 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @start_addr is inclusive but @end_addr is exclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u16 max_len = DABR_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u16 hw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long start_addr, end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) hw_len = end_addr - start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (dawr_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) max_len = DAWR_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* 8xx can setup a range without limitation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) max_len = U16_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (hw_len > max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) hw->hw_len = hw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Validate the arch-specific HW Breakpoint register settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int hw_breakpoint_arch_parse(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) const struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!bp || !attr->bp_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) hw->type = HW_BRK_TYPE_TRANSLATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (attr->bp_type & HW_BREAKPOINT_R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hw->type |= HW_BRK_TYPE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (attr->bp_type & HW_BREAKPOINT_W)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) hw->type |= HW_BRK_TYPE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (hw->type == HW_BRK_TYPE_TRANSLATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* must set alteast read or write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!attr->exclude_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) hw->type |= HW_BRK_TYPE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!attr->exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) hw->type |= HW_BRK_TYPE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!attr->exclude_hv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) hw->type |= HW_BRK_TYPE_HYP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) hw->address = attr->bp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) hw->len = attr->bp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!ppc_breakpoint_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return hw_breakpoint_validate_len(hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Restores the breakpoint on the debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Invoke this function if it is known that the execution context is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * about to change to cause loss of MSR_SE settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (unlikely(tsk->thread.last_hit_ubp[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) regs->msr &= ~MSR_SE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) __set_breakpoint(i, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) tsk->thread.last_hit_ubp[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static bool is_larx_stcx_instr(int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return type == LARX || type == STCX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * We've failed in reliably handling the hw-breakpoint. Unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * it and throw a warning message to let the user know about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) info->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) perf_event_disable_inatomic(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) info->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) perf_event_disable_inatomic(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct arch_hw_breakpoint **info, int *hit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct ppc_inst instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int stepped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Do not emulate user-space instructions, instead single-step them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!hit[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) current->thread.last_hit_ubp[i] = bp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) info[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) regs->msr |= MSR_SE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) stepped = emulate_step(regs, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!stepped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!hit[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) handler_error(bp[i], info[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) info[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int hw_breakpoint_handler(struct die_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bool err = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) int rc = NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct perf_event *bp[HBP_NUM_MAX] = { NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct pt_regs *regs = args->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int hit[HBP_NUM_MAX] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int nr_hit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) bool ptrace_bp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct ppc_inst instr = ppc_inst(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned long ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Disable breakpoints during exception handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) hw_breakpoint_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * The counter may be concurrently released but that can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * occur from a call_rcu() path. We can then safely fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * the breakpoint, use its callback, touch its counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * while we are in an rcu_read_lock() path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!IS_ENABLED(CONFIG_PPC_8xx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) wp_get_instr_detail(regs, &instr, &type, &size, &ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) bp[i] = __this_cpu_read(bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!bp[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) info[i] = counter_arch_bp(bp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!IS_ENABLED(CONFIG_PPC_8xx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ppc_inst_equal(instr, ppc_inst(0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) handler_error(bp[i], info[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) info[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (is_ptrace_bp(bp[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ptrace_bp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) hit[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) nr_hit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!nr_hit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rc = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Return early after invoking user-callback function without restoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * DABR if the breakpoint is from ptrace which always operates in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * generated in do_dabr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (ptrace_bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!hit[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) perf_bp_event(bp[i], regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) info[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rc = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!IS_ENABLED(CONFIG_PPC_8xx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (is_larx_stcx_instr(type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!hit[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) larx_stcx_err(bp[i], info[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) info[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!stepping_handler(regs, bp, info, hit, instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * As a policy, the callback is invoked in a 'trigger-after-execute'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * fashion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (!hit[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) perf_bp_event(bp[i], regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!info[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) __set_breakpoint(i, info[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) NOKPROBE_SYMBOL(hw_breakpoint_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * Handle single-step exceptions following a DABR hit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static int single_step_dabr_instruction(struct die_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct pt_regs *regs = args->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct perf_event *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Check if we are single-stepping as a result of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * previous HW Breakpoint exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bp = current->thread.last_hit_ubp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * We shall invoke the user-defined callback function in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * single stepping handler to confirm to 'trigger-after-execute'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) perf_bp_event(bp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) current->thread.last_hit_ubp[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) bp = __this_cpu_read(bp_per_reg[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) info = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) __set_breakpoint(i, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * If the process was being single-stepped by ptrace, let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * other single-step actions occur (e.g. generate SIGTRAP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (test_thread_flag(TIF_SINGLESTEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) NOKPROBE_SYMBOL(single_step_dabr_instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Handle debug exception notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int hw_breakpoint_exceptions_notify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct notifier_block *unused, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) case DIE_DABR_MATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = hw_breakpoint_handler(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) case DIE_SSTEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret = single_step_dabr_instruction(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * Release the user breakpoints used by ptrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct thread_struct *t = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unregister_hw_breakpoint(t->ptrace_bps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) t->ptrace_bps[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) void hw_breakpoint_pmu_read(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* TODO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) void ptrace_triggered(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct perf_sample_data *data, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Disable the breakpoint request here since ptrace has defined a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * one-shot behaviour for breakpoint exceptions in PPC64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * The SIGTRAP signal is generated automatically for us in do_dabr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * We don't have to do anything about that here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) attr.disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) modify_user_hw_breakpoint(bp, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }