^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2007 Alan Stern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) IBM Corporation, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Thanks to Ingo Molnar for his many suggestions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors: Alan Stern <stern@rowland.harvard.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * K.Prasad <prasad@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Frederic Weisbecker <fweisbec@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * using the CPU's debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * This file contains the arch-independent routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Constraints data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct bp_cpuinfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Number of pinned cpu breakpoints in a cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int cpu_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int *tsk_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Number of non-pinned cpu/task breakpoints in a cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int nr_slots[TYPE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return per_cpu_ptr(bp_cpuinfo + type, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Keep track of the breakpoints attached to tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static LIST_HEAD(bp_task_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int constraints_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Gather the number of total pinned and un-pinned bp in a cpuset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct bp_busy_slots {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned int flexible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Serialize accesses to the above constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static DEFINE_MUTEX(nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __weak int hw_breakpoint_weight(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline enum bp_type_idx find_slot_idx(u64 bp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (bp_type & HW_BREAKPOINT_RW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return TYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return TYPE_INST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Report the maximum number of pinned breakpoints a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * have in this cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (i = nr_slots[type] - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (tsk_pinned[i] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Count the number of breakpoints of the same type and same task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * The given event must be not on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct task_struct *tsk = bp->hw.target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct perf_event *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (iter->hw.target == tsk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) find_slot_idx(iter->attr.bp_type) == type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) (iter->cpu < 0 || cpu == iter->cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) count += hw_breakpoint_weight(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (bp->cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return cpumask_of(bp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return cpu_possible_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Report the number of pinned/un-pinned breakpoints we have in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * a given cpu (cpu > -1) or in all of them (cpu = -1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) enum bp_type_idx type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const struct cpumask *cpumask = cpumask_of_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for_each_cpu(cpu, cpumask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct bp_cpuinfo *info = get_bp_info(cpu, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) nr = info->cpu_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!bp->hw.target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) nr += max_task_bp_pinned(cpu, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) nr += task_bp_pinned(cpu, bp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (nr > slots->pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) slots->pinned = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) nr = info->flexible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (nr > slots->flexible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) slots->flexible = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * For now, continue to consider flexible as pinned, until we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * ensure no flexible event can ever be scheduled before a pinned event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * in a same cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) fetch_this_slot(struct bp_busy_slots *slots, int weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) slots->pinned += weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Add a pinned breakpoint for the given task in our constraint table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) enum bp_type_idx type, int weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int old_idx, new_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) old_idx = task_bp_pinned(cpu, bp, type) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) new_idx = old_idx + weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (old_idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) tsk_pinned[old_idx]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (new_idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) tsk_pinned[new_idx]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Add/remove the given breakpoint in our constraint table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) const struct cpumask *cpumask = cpumask_of_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) weight = -weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Pinned counter cpu profiling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!bp->hw.target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) get_bp_info(bp->cpu, type)->cpu_pinned += weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Pinned counter task profiling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) for_each_cpu(cpu, cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) toggle_bp_task_slot(bp, cpu, type, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) list_add_tail(&bp->hw.bp_list, &bp_task_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) list_del(&bp->hw.bp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __weak int arch_reserve_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __weak void arch_release_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Function to perform processor-specific cleanup during unregistration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * A weak stub function here for those archs that don't define
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * it inside arch/.../kernel/hw_breakpoint.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Constraints to check before allowing this new breakpoint counter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * == Non-pinned counter == (Considered as pinned for now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * - If attached to a single cpu, check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * -> If there are already non-pinned counters in this cpu, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * there is already a free slot for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Otherwise, we check that the maximum number of per task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * breakpoints (for this cpu) plus the number of per cpu breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * (for this cpu) doesn't cover every registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * - If attached to every cpus, check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * -> This is roughly the same, except we check the number of per cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * bp for every cpu and we keep the max one. Same for the per tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * breakpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * == Pinned counter ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * - If attached to a single cpu, check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * -> Same checks as before. But now the info->flexible, if any, must keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * one register at least (or they will never be fed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * - If attached to every cpus, check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct bp_busy_slots slots = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) enum bp_type_idx type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* We couldn't initialize breakpoint constraints on boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!constraints_initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Basic checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (bp_type == HW_BREAKPOINT_EMPTY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) bp_type == HW_BREAKPOINT_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) type = find_slot_idx(bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) weight = hw_breakpoint_weight(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) fetch_bp_busy_slots(&slots, bp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Simulate the addition of this breakpoint to the constraints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * and see the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) fetch_this_slot(&slots, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Flexible counters need to keep at least one slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (slots.pinned + (!!slots.flexible) > nr_slots[type])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = arch_reserve_bp_slot(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) toggle_bp_slot(bp, true, type, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int reserve_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mutex_lock(&nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = __reserve_bp_slot(bp, bp->attr.bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mutex_unlock(&nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) enum bp_type_idx type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) arch_release_bp_slot(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) type = find_slot_idx(bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) weight = hw_breakpoint_weight(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) toggle_bp_slot(bp, false, type, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void release_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mutex_lock(&nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) arch_unregister_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) __release_bp_slot(bp, bp->attr.bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) mutex_unlock(&nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) __release_bp_slot(bp, old_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) err = __reserve_bp_slot(bp, new_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Reserve the old_type slot back in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * there's no space for the new type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * This must succeed, because we just released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * the old_type slot in the __release_bp_slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * call above. If not, something is broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) WARN_ON(__reserve_bp_slot(bp, old_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) mutex_lock(&nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = __modify_bp_slot(bp, old_type, new_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mutex_unlock(&nr_bp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Allow the kernel debugger to reserve breakpoint slots without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * taking a lock using the dbg_* variant of for the reserve and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * release breakpoint slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int dbg_reserve_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (mutex_is_locked(&nr_bp_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return __reserve_bp_slot(bp, bp->attr.bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int dbg_release_bp_slot(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (mutex_is_locked(&nr_bp_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) __release_bp_slot(bp, bp->attr.bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int hw_breakpoint_parse(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) const struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct arch_hw_breakpoint *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) err = hw_breakpoint_arch_parse(bp, attr, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (arch_check_bp_in_kernelspace(hw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (attr->exclude_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Don't let unprivileged users set a breakpoint in the trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * path to avoid trap recursion attacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int register_perf_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct arch_hw_breakpoint hw = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) err = reserve_bp_slot(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) err = hw_breakpoint_parse(bp, &bp->attr, &hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) release_bp_slot(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) bp->hw.info = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * register_user_hw_breakpoint - register a hardware breakpoint for user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @attr: breakpoint attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @triggered: callback to trigger when we hit the breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @tsk: pointer to 'task_struct' of the process to which the address belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct perf_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) register_user_hw_breakpoint(struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) perf_overflow_handler_t triggered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct perf_event_attr *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) to->bp_addr = from->bp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) to->bp_type = from->bp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) to->bp_len = from->bp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) to->disabled = from->disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) bool check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct arch_hw_breakpoint hw = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) err = hw_breakpoint_parse(bp, attr, &hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct perf_event_attr old_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) old_attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) hw_breakpoint_copy_attr(&old_attr, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (memcmp(&old_attr, attr, sizeof(*attr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (bp->attr.bp_type != attr->bp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) hw_breakpoint_copy_attr(&bp->attr, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) bp->hw.info = hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @bp: the breakpoint structure to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @attr: new breakpoint attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * will not be possible to raise IPIs that invoke __perf_event_disable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * So call the function directly after making sure we are targeting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) perf_event_disable_local(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) perf_event_disable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) err = modify_user_hw_breakpoint_check(bp, attr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!bp->attr.disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) perf_event_enable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * @bp: the breakpoint structure to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void unregister_hw_breakpoint(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) perf_event_release_kernel(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @attr: breakpoint attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @triggered: callback to trigger when we hit the breakpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @return a set of per_cpu pointers to perf events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct perf_event * __percpu *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) register_wide_hw_breakpoint(struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) perf_overflow_handler_t triggered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct perf_event * __percpu *cpu_events, *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) long err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cpu_events = alloc_percpu(typeof(*cpu_events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!cpu_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return (void __percpu __force *)ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) bp = perf_event_create_kernel_counter(attr, cpu, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) triggered, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (IS_ERR(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) err = PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) per_cpu(*cpu_events, cpu) = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return cpu_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unregister_wide_hw_breakpoint(cpu_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return (void __percpu __force *)ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * @cpu_events: the per cpu set of events to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) free_percpu(cpu_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static struct notifier_block hw_breakpoint_exceptions_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .notifier_call = hw_breakpoint_exceptions_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* we need to be notified first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .priority = 0x7fffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static void bp_perf_event_destroy(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) release_bp_slot(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int hw_breakpoint_event_init(struct perf_event *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (bp->attr.type != PERF_TYPE_BREAKPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * no branch sampling for breakpoint events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (has_branch_stack(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) err = register_perf_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) bp->destroy = bp_perf_event_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static int hw_breakpoint_add(struct perf_event *bp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!(flags & PERF_EF_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) bp->hw.state = PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (is_sampling_event(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) bp->hw.last_period = bp->hw.sample_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) perf_swevent_set_period(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return arch_install_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static void hw_breakpoint_del(struct perf_event *bp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) arch_uninstall_hw_breakpoint(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void hw_breakpoint_start(struct perf_event *bp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bp->hw.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void hw_breakpoint_stop(struct perf_event *bp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) bp->hw.state = PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static struct pmu perf_breakpoint = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) .task_ctx_nr = perf_sw_context, /* could eventually get its own */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .event_init = hw_breakpoint_event_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .add = hw_breakpoint_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) .del = hw_breakpoint_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) .start = hw_breakpoint_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) .stop = hw_breakpoint_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) .read = hw_breakpoint_pmu_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int __init init_hw_breakpoint(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int cpu, err_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) for (i = 0; i < TYPE_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) nr_slots[i] = hw_breakpoint_slots(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) for (i = 0; i < TYPE_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct bp_cpuinfo *info = get_bp_info(cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!info->tsk_pinned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) constraints_initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return register_die_notifier(&hw_breakpoint_exceptions_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) for_each_possible_cpu(err_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) for (i = 0; i < TYPE_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) kfree(get_bp_info(err_cpu, i)->tsk_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (err_cpu == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)