^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * trace event based perf event profiling/tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "trace_probe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Force it to be aligned to unsigned long to avoid misaligned accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * suprises
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) perf_trace_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Count the events in use (per event id, not per instance) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int total_ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int perf_trace_event_perm(struct trace_event_call *tp_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (tp_event->perf_perm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ret = tp_event->perf_perm(tp_event, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * We checked and allowed to create parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * allow children without checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (p_event->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * It's ok to check current process (owner) permissions in here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * because code below is called only via perf_event_open syscall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* The ftrace function trace is allowed only for root. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (ftrace_event_is_function(tp_event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ret = perf_allow_tracepoint(&p_event->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!is_sampling_event(p_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * We don't allow user space callchains for function trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * event, due to issues with page faults while tracing page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * fault handler and its overall trickiness nature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!p_event->attr.exclude_callchain_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Same reason to disable user stack dump as for user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * callchains above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* No tracing, just counting, so no obvious leak */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Some events are ok to be traced by non-root users... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (p_event->attach_state == PERF_ATTACH_TASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * ...otherwise raw tracepoint data can be a severe data leak,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * only allow root to have these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ret = perf_allow_tracepoint(&p_event->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static int perf_trace_event_reg(struct trace_event_call *tp_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct hlist_head __percpu *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) p_event->tp_event = tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (tp_event->perf_refcount++ > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) list = alloc_percpu(struct hlist_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) tp_event->perf_events = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!total_ref_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) char __percpu *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) for (i = 0; i < PERF_NR_CONTEXTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) buf = (char __percpu *)alloc_percpu(perf_trace_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) perf_trace_buf[i] = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) total_ref_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!total_ref_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) for (i = 0; i < PERF_NR_CONTEXTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) free_percpu(perf_trace_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) perf_trace_buf[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!--tp_event->perf_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) free_percpu(tp_event->perf_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) tp_event->perf_events = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void perf_trace_event_unreg(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct trace_event_call *tp_event = p_event->tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (--tp_event->perf_refcount > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Ensure our callback won't be called anymore. The buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * will be freed after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) tracepoint_synchronize_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) free_percpu(tp_event->perf_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) tp_event->perf_events = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!--total_ref_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for (i = 0; i < PERF_NR_CONTEXTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) free_percpu(perf_trace_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) perf_trace_buf[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) module_put(tp_event->mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int perf_trace_event_open(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct trace_event_call *tp_event = p_event->tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void perf_trace_event_close(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct trace_event_call *tp_event = p_event->tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int perf_trace_event_init(struct trace_event_call *tp_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = perf_trace_event_perm(tp_event, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ret = perf_trace_event_reg(tp_event, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret = perf_trace_event_open(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) perf_trace_event_unreg(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int perf_trace_init(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct trace_event_call *tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u64 event_id = p_event->attr.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_for_each_entry(tp_event, &ftrace_events, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (tp_event->event.type == event_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) tp_event->class && tp_event->class->reg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) try_module_get(tp_event->mod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = perf_trace_event_init(tp_event, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) module_put(tp_event->mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void perf_trace_destroy(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) perf_trace_event_close(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) perf_trace_event_unreg(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_KPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) char *func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct trace_event_call *tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (p_event->attr.kprobe_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ret = strncpy_from_user(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) func, u64_to_user_ptr(p_event->attr.kprobe_func),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) KSYM_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (ret == KSYM_NAME_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ret = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (func[0] == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) kfree(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) tp_event = create_local_trace_kprobe(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) p_event->attr.probe_offset, is_retprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (IS_ERR(tp_event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = PTR_ERR(tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ret = perf_trace_event_init(tp_event, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) destroy_local_trace_kprobe(tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) kfree(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void perf_kprobe_destroy(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) perf_trace_event_close(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) perf_trace_event_unreg(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) destroy_local_trace_kprobe(p_event->tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #endif /* CONFIG_KPROBE_EVENTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #ifdef CONFIG_UPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int perf_uprobe_init(struct perf_event *p_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) unsigned long ref_ctr_offset, bool is_retprobe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) char *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct trace_event_call *tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!p_event->attr.uprobe_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) PATH_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (IS_ERR(path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = PTR_ERR(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return (ret == -EINVAL) ? -E2BIG : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (path[0] == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ref_ctr_offset, is_retprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (IS_ERR(tp_event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = PTR_ERR(tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * local trace_uprobe need to hold event_mutex to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * uprobe_buffer_enable() and uprobe_buffer_disable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * event_mutex is not required for local trace_kprobes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ret = perf_trace_event_init(tp_event, p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) destroy_local_trace_uprobe(tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) void perf_uprobe_destroy(struct perf_event *p_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) perf_trace_event_close(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) perf_trace_event_unreg(p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) destroy_local_trace_uprobe(p_event->tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #endif /* CONFIG_UPROBE_EVENTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int perf_trace_add(struct perf_event *p_event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct trace_event_call *tp_event = p_event->tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!(flags & PERF_EF_START))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) p_event->hw.state = PERF_HES_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * If TRACE_REG_PERF_ADD returns false; no custom action was performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * and we need to take the default action of enqueueing our event on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * the right per-cpu hlist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct hlist_head __percpu *pcpu_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct hlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) pcpu_list = tp_event->perf_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (WARN_ON_ONCE(!pcpu_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) list = this_cpu_ptr(pcpu_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) hlist_add_head_rcu(&p_event->hlist_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void perf_trace_del(struct perf_event *p_event, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct trace_event_call *tp_event = p_event->tp_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * If TRACE_REG_PERF_DEL returns false; no custom action was performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * and we need to take the default action of dequeueing our event from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * the right per-cpu hlist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) hlist_del_rcu(&p_event->hlist_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) char *raw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) "perf buffer not large enough"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *rctxp = rctx = perf_swevent_get_recursion_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (rctx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *regs = this_cpu_ptr(&__perf_regs[rctx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* zero the dead bytes from align to not leak stack to user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return raw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) NOKPROBE_SYMBOL(perf_trace_buf_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) void perf_trace_buf_update(void *record, u16 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct trace_entry *entry = record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) tracing_generic_entry_update(entry, type, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) NOKPROBE_SYMBOL(perf_trace_buf_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct ftrace_ops *ops, struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct ftrace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct hlist_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct pt_regs regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int rctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if ((unsigned long)ops->private != smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) event = container_of(ops, struct perf_event, ftrace_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * the perf code does is hlist_for_each_entry_rcu(), so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * get away with simply setting the @head.first pointer in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * to create a singular list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) head.first = &event->hlist_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sizeof(u64)) - sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) memset(®s, 0, sizeof(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) perf_fetch_caller_regs(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) entry->parent_ip = parent_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 1, ®s, &head, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #undef ENTRY_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static int perf_ftrace_function_register(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct ftrace_ops *ops = &event->ftrace_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ops->flags = FTRACE_OPS_FL_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ops->func = perf_ftrace_function_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ops->private = (void *)(unsigned long)nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return register_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static int perf_ftrace_function_unregister(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct ftrace_ops *ops = &event->ftrace_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int ret = unregister_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ftrace_free_filter(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int perf_ftrace_event_register(struct trace_event_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) enum trace_reg type, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct perf_event *event = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) case TRACE_REG_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) case TRACE_REG_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) case TRACE_REG_PERF_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case TRACE_REG_PERF_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) case TRACE_REG_PERF_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return perf_ftrace_function_register(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) case TRACE_REG_PERF_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return perf_ftrace_function_unregister(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) case TRACE_REG_PERF_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case TRACE_REG_PERF_DEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #endif /* CONFIG_FUNCTION_TRACER */