^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016 Facebook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bpf_perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/error-injection.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/btf_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <uapi/linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <uapi/linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "trace_probe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "bpf_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define bpf_event_rcu_dereference(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct bpf_trace_module {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static LIST_HEAD(bpf_trace_modules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static DEFINE_MUTEX(bpf_module_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct bpf_raw_event_map *btp, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct bpf_trace_module *btm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mutex_lock(&bpf_module_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) list_for_each_entry(btm, &bpf_trace_modules, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) btp = &btm->module->bpf_raw_events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (!strcmp(btp->tp->name, name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (try_module_get(btm->module))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ret = btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mutex_unlock(&bpf_module_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u64 flags, const struct btf **btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) s32 *btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * trace_call_bpf - invoke BPF program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @call: tracepoint event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @ctx: opaque context pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * kprobe handlers execute BPF programs via this helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Can be used from static tracepoints in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Return: BPF programs always return an integer which is interpreted by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * kprobe handler as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * 0 - return from kprobe (event is filtered out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * 1 - store kprobe event into ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Other values are reserved and currently alias to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) cant_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * since some bpf program is already running on this cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * don't call into another bpf program (same or different)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * and don't send kprobe event into ring-buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * so return zero here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * to all call sites, we did a bpf_prog_array_valid() there to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * whether call->prog_array is empty or not, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * a heurisitc to speed up execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * If bpf_prog_array_valid() fetched prog_array was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * non-NULL, we go into trace_call_bpf() and do the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * proper rcu_dereference() under RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * If it turns out that prog_array is NULL then, we bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * For the opposite, if the bpf_prog_array_valid() fetched pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * was NULL, you'll skip the prog_array with the risk of missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * out of events when it was updated in between this and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * rcu_dereference() which is accepted risk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __this_cpu_dec(bpf_prog_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #ifdef CONFIG_BPF_KPROBE_OVERRIDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) regs_set_return_value(regs, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) override_function_with_return(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static const struct bpf_func_proto bpf_override_return_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) .func = bpf_override_return,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .arg2_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ret = copy_from_user_nofault(dst, unsafe_ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) memset(dst, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const void __user *, unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return bpf_probe_read_user_common(dst, size, unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) const struct bpf_func_proto bpf_probe_read_user_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .func = bpf_probe_read_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) .arg1_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) .arg2_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bpf_probe_read_user_str_common(void *dst, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) const void __user *unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * NB: We rely on strncpy_from_user() not copying junk past the NUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * terminator into `dst`.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * strncpy_from_user() does long-sized strides in the fast path. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * then there could be junk after the NUL in `dst`. If user takes `dst`
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * and keys a hash map with it, then semantically identical strings can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * occupy multiple entries in the map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) memset(dst, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) const void __user *, unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) const struct bpf_func_proto bpf_probe_read_user_str_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .func = bpf_probe_read_user_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .arg1_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .arg2_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) memset(dst, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) const void *, unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) const struct bpf_func_proto bpf_probe_read_kernel_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .func = bpf_probe_read_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .arg1_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .arg2_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * The strncpy_from_kernel_nofault() call will likely not fill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * entire buffer, but that's okay in this circumstance as we're probing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * arbitrary memory anyway similar to bpf_probe_read_*() and might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * as well probe the stack. Thus, memory is explicitly cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * only in error case, so that improper users ignoring return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * code altogether don't copy garbage; otherwise length of string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * is returned that can be used for bpf_perf_event_output() et al.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) memset(dst, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const void *, unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .func = bpf_probe_read_kernel_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .arg1_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .arg2_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) const void *, unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if ((unsigned long)unsafe_ptr < TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return bpf_probe_read_user_common(dst, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (__force void __user *)unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static const struct bpf_func_proto bpf_probe_read_compat_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .func = bpf_probe_read_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .arg1_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .arg2_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) const void *, unsafe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if ((unsigned long)unsafe_ptr < TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return bpf_probe_read_user_str_common(dst, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) (__force void __user *)unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .func = bpf_probe_read_compat_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) .arg1_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) .arg2_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u32, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Ensure we're in user context which is safe for the helper to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * run. This helper has no business in a kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * access_ok() should prevent writing to non-user memory, but in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * some situations (nommu, temporary switch, etc) access_ok() does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * not provide enough validation, hence the check on KERNEL_DS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * nmi_uaccess_okay() ensures the probe is not run in an interim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * state, when the task or mm are switched. This is specifically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * required to prevent the use of temporary mm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (unlikely(in_interrupt() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) current->flags & (PF_KTHREAD | PF_EXITING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (unlikely(uaccess_kernel()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (unlikely(!nmi_uaccess_okay()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return copy_to_user_nofault(unsafe_ptr, src, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static const struct bpf_func_proto bpf_probe_write_user_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) .func = bpf_probe_write_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) .arg1_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .arg2_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .arg3_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return &bpf_probe_write_user_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) size_t bufsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) void __user *user_ptr = (__force void __user *)unsafe_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) buf[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) switch (fmt_ptype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case 's':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if ((unsigned long)unsafe_ptr < TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) strncpy_from_user_nofault(buf, user_ptr, bufsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) case 'k':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) case 'u':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) strncpy_from_user_nofault(buf, user_ptr, bufsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static DEFINE_RAW_SPINLOCK(trace_printk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define BPF_TRACE_PRINTK_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static char buf[BPF_TRACE_PRINTK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) raw_spin_lock_irqsave(&trace_printk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ret = vsnprintf(buf, sizeof(buf), fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* vsnprintf() will not append null for zero-length strings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) buf[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) trace_bpf_trace_printk(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Only limited trace_printk() conversion specifiers allowed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u64, arg2, u64, arg3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int i, mod[3] = {}, fmt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) char buf[64], fmt_ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) void *unsafe_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) bool str_seen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * bpf_check()->check_func_arg()->check_stack_boundary()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * guarantees that fmt points to bpf program stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * fmt_size bytes of it were initialized and fmt_size > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (fmt[--fmt_size] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* check format string for allowed specifiers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) for (i = 0; i < fmt_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (fmt[i] != '%')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (fmt_cnt >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (fmt[i] == 'l') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mod[fmt_cnt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } else if (fmt[i] == 'p') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mod[fmt_cnt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if ((fmt[i + 1] == 'k' ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) fmt[i + 1] == 'u') &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) fmt[i + 2] == 's') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) fmt_ptype = fmt[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) i += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto fmt_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (fmt[i + 1] == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto fmt_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* disallow any further format extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (fmt[i + 1] != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) !isspace(fmt[i + 1]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) !ispunct(fmt[i + 1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto fmt_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else if (fmt[i] == 's') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) mod[fmt_cnt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) fmt_ptype = fmt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) fmt_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (str_seen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* allow only one '%s' per fmt string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) str_seen = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (fmt[i + 1] != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) !isspace(fmt[i + 1]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) !ispunct(fmt[i + 1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) switch (fmt_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsafe_ptr = (void *)(long)arg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) arg1 = (long)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsafe_ptr = (void *)(long)arg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) arg2 = (long)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsafe_ptr = (void *)(long)arg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) arg3 = (long)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto fmt_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (fmt[i] == 'l') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mod[fmt_cnt]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (fmt[i] != 'i' && fmt[i] != 'd' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) fmt[i] != 'u' && fmt[i] != 'x')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) fmt_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) fmt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Horrid workaround for getting va_list handling working with different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * argument type combinations generically for 32 and 64 bit archs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) #define __BPF_TP_EMIT() __BPF_ARG3_TP()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #define __BPF_TP(...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) bpf_do_trace_printk(fmt, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #define __BPF_ARG1_TP(...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ? __BPF_TP(arg1, ##__VA_ARGS__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #define __BPF_ARG2_TP(...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #define __BPF_ARG3_TP(...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return __BPF_TP_EMIT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static const struct bpf_func_proto bpf_trace_printk_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) .func = bpf_trace_printk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) .arg1_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) .arg2_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * This program might be calling bpf_trace_printk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * so enable the associated bpf_trace/bpf_trace_printk event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Repeat this each time as it is possible a user has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * disabled bpf_trace_printk events. By loading a program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * calling bpf_trace_printk() however the user has expressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * the intent to see such events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pr_warn_ratelimited("could not enable bpf_trace_printk events");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return &bpf_trace_printk_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #define MAX_SEQ_PRINTF_VARARGS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #define MAX_SEQ_PRINTF_MAX_MEMCPY 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #define MAX_SEQ_PRINTF_STR_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct bpf_seq_printf_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) const void *, data, u32, data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int i, buf_used, copy_size, num_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u64 params[MAX_SEQ_PRINTF_VARARGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct bpf_seq_printf_buf *bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) const u64 *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (WARN_ON_ONCE(buf_used > 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) bufs = this_cpu_ptr(&bpf_seq_printf_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * bpf_check()->check_func_arg()->check_stack_boundary()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * guarantees that fmt points to bpf program stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * fmt_size bytes of it were initialized and fmt_size > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (fmt[--fmt_size] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (data_len & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (i = 0; i < fmt_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (fmt[i] == '%') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (fmt[i + 1] == '%')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) else if (!data || !data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) num_args = data_len / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* check format string for allowed specifiers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (i = 0; i < fmt_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* only printable ascii for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (fmt[i] != '%')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (fmt[i + 1] == '%') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (fmt_cnt >= num_args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* skip optional "[0 +-][num]" width formating field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) fmt[i] == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (fmt[i] >= '1' && fmt[i] <= '9') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) while (fmt[i] >= '0' && fmt[i] <= '9')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (fmt[i] == 's') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void *unsafe_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* try our best to copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) unsafe_ptr = (void *)(long)args[fmt_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bufs->buf[memcpy_cnt][0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) fmt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) memcpy_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (fmt[i] == 'p') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (fmt[i + 1] == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) fmt[i + 1] == 'K' ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) fmt[i + 1] == 'x' ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) fmt[i + 1] == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* just kernel pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) params[fmt_cnt] = args[fmt_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) fmt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) copy_size = (fmt[i + 2] == '4') ? 4 : 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) (void *) (long) args[fmt_cnt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) copy_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) memset(bufs->buf[memcpy_cnt], 0, copy_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) i += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) fmt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) memcpy_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (fmt[i] == 'l') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (fmt[i] == 'l')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (fmt[i] != 'i' && fmt[i] != 'd' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) fmt[i] != 'u' && fmt[i] != 'x' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) fmt[i] != 'X') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) params[fmt_cnt] = args[fmt_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) fmt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * all of them to seq_printf().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) seq_printf(m, fmt, params[0], params[1], params[2], params[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) params[4], params[5], params[6], params[7], params[8],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) params[9], params[10], params[11]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) this_cpu_dec(bpf_seq_printf_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static const struct bpf_func_proto bpf_seq_printf_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .func = bpf_seq_printf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .arg1_type = ARG_PTR_TO_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .arg1_btf_id = &btf_seq_file_ids[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .arg2_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .arg3_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .arg5_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return seq_write(m, data, len) ? -EOVERFLOW : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static const struct bpf_func_proto bpf_seq_write_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .func = bpf_seq_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .arg1_type = ARG_PTR_TO_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .arg1_btf_id = &btf_seq_file_ids[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .arg2_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .arg3_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) u32, btf_ptr_size, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) s32 btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) .func = bpf_seq_printf_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) .arg1_type = ARG_PTR_TO_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) .arg1_btf_id = &btf_seq_file_ids[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .arg2_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) .arg3_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) .arg4_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static __always_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) get_map_perf_counter(struct bpf_map *map, u64 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) u64 *value, u64 *enabled, u64 *running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct bpf_array *array = container_of(map, struct bpf_array, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u64 index = flags & BPF_F_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct bpf_event_entry *ee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (index == BPF_F_CURRENT_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) index = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (unlikely(index >= array->map.max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ee = READ_ONCE(array->ptrs[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!ee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return perf_event_read_local(ee->event, value, enabled, running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u64 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) err = get_map_perf_counter(map, flags, &value, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * this api is ugly since we miss [-22..-2] range of valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * counter values, but that's uapi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static const struct bpf_func_proto bpf_perf_event_read_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) .func = bpf_perf_event_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) .arg1_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) .arg2_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct bpf_perf_event_value *, buf, u32, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (unlikely(size != sizeof(struct bpf_perf_event_value)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) &buf->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) memset(buf, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) .func = bpf_perf_event_read_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) .arg1_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) .arg2_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) .arg3_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) .arg4_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static __always_inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u64 flags, struct perf_sample_data *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct bpf_array *array = container_of(map, struct bpf_array, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) u64 index = flags & BPF_F_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct bpf_event_entry *ee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (index == BPF_F_CURRENT_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) index = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (unlikely(index >= array->map.max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ee = READ_ONCE(array->ptrs[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (!ee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) event = ee->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (unlikely(event->oncpu != cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return perf_event_output(event, sd, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * Support executing tracepoints in normal, irq, and nmi context that each call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * bpf_perf_event_output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct bpf_trace_sample_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct perf_sample_data sds[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static DEFINE_PER_CPU(int, bpf_trace_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) u64, flags, void *, data, u64, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct perf_raw_record raw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .frag = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .size = size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct perf_sample_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) sd = &sds->sds[nest_level - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) perf_sample_data_init(sd, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) sd->raw = &raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) err = __bpf_perf_event_output(regs, map, flags, sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) this_cpu_dec(bpf_trace_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static const struct bpf_func_proto bpf_perf_event_output_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .func = bpf_perf_event_output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) .arg2_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) .arg4_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) .arg5_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct bpf_nested_pt_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct pt_regs regs[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct perf_raw_frag frag = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .copy = ctx_copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .size = ctx_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .data = ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct perf_raw_record raw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .frag = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .next = ctx_size ? &frag : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .size = meta_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .data = meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct perf_sample_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) perf_fetch_caller_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) perf_sample_data_init(sd, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) sd->raw = &raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ret = __bpf_perf_event_output(regs, map, flags, sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) this_cpu_dec(bpf_event_output_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) BPF_CALL_0(bpf_get_current_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return (long) current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) const struct bpf_func_proto bpf_get_current_task_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .func = bpf_get_current_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct bpf_array *array = container_of(map, struct bpf_array, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct cgroup *cgrp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (unlikely(idx >= array->map.max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) cgrp = READ_ONCE(array->ptrs[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (unlikely(!cgrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return task_under_cgroup_hierarchy(current, cgrp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .func = bpf_current_task_under_cgroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .gpl_only = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .arg1_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .arg2_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct send_signal_irq_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct irq_work irq_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) u32 sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) enum pid_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void do_bpf_send_signal(struct irq_work *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct send_signal_irq_work *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) work = container_of(entry, struct send_signal_irq_work, irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static int bpf_send_signal_common(u32 sig, enum pid_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct send_signal_irq_work *work = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* Similar to bpf_probe_write_user, task needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * in a sound condition and kernel memory access be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * permitted in order to send signal to the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (unlikely(uaccess_kernel()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (unlikely(!nmi_uaccess_okay()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (irqs_disabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /* Do an early check on signal validity. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * the error is lost in deferred irq_work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (unlikely(!valid_signal(sig)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) work = this_cpu_ptr(&send_signal_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Add the current task, which is the target of sending signal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * to the irq_work. The current task may change when queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * irq works get executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) work->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) work->sig = sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) work->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) irq_work_queue(&work->irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) BPF_CALL_1(bpf_send_signal, u32, sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return bpf_send_signal_common(sig, PIDTYPE_TGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static const struct bpf_func_proto bpf_send_signal_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .func = bpf_send_signal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .gpl_only = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .arg1_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) BPF_CALL_1(bpf_send_signal_thread, u32, sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return bpf_send_signal_common(sig, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static const struct bpf_func_proto bpf_send_signal_thread_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .func = bpf_send_signal_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) .gpl_only = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) .arg1_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (!sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) p = d_path(path, buf, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (IS_ERR(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) len = PTR_ERR(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) len = buf + sz - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) memmove(buf, p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) BTF_SET_START(btf_allowlist_d_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) #ifdef CONFIG_SECURITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) BTF_ID(func, security_file_permission)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) BTF_ID(func, security_inode_getattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) BTF_ID(func, security_file_open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) #ifdef CONFIG_SECURITY_PATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) BTF_ID(func, security_path_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) BTF_ID(func, vfs_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) BTF_ID(func, vfs_fallocate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) BTF_ID(func, dentry_open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) BTF_ID(func, vfs_getattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) BTF_ID(func, filp_close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) BTF_SET_END(btf_allowlist_d_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static bool bpf_d_path_allowed(const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static const struct bpf_func_proto bpf_d_path_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) .func = bpf_d_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) .gpl_only = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) .arg1_type = ARG_PTR_TO_BTF_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) .arg1_btf_id = &bpf_d_path_btf_ids[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) .arg2_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) .arg3_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) .allowed = bpf_d_path_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) BTF_F_PTR_RAW | BTF_F_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) u64 flags, const struct btf **btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) s32 *btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (unlikely(flags & ~(BTF_F_ALL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (btf_ptr_size != sizeof(struct btf_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *btf = bpf_get_btf_vmlinux();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (IS_ERR_OR_NULL(*btf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (ptr->type_id > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *btf_id = ptr->type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (*btf_id > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) t = btf_type_by_id(*btf, *btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (*btf_id <= 0 || !t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) u32, btf_ptr_size, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) s32 btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) const struct bpf_func_proto bpf_snprintf_btf_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) .func = bpf_snprintf_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) .gpl_only = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) .arg1_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) .arg2_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) .arg3_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) .arg4_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) .arg5_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) case BPF_FUNC_map_lookup_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return &bpf_map_lookup_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) case BPF_FUNC_map_update_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return &bpf_map_update_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) case BPF_FUNC_map_delete_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return &bpf_map_delete_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) case BPF_FUNC_map_push_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return &bpf_map_push_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) case BPF_FUNC_map_pop_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return &bpf_map_pop_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) case BPF_FUNC_map_peek_elem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return &bpf_map_peek_elem_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) case BPF_FUNC_ktime_get_ns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return &bpf_ktime_get_ns_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) case BPF_FUNC_ktime_get_boot_ns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return &bpf_ktime_get_boot_ns_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) case BPF_FUNC_tail_call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return &bpf_tail_call_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) case BPF_FUNC_get_current_pid_tgid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return &bpf_get_current_pid_tgid_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) case BPF_FUNC_get_current_task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return &bpf_get_current_task_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) case BPF_FUNC_get_current_uid_gid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return &bpf_get_current_uid_gid_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) case BPF_FUNC_get_current_comm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return &bpf_get_current_comm_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) case BPF_FUNC_trace_printk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return bpf_get_trace_printk_proto();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) case BPF_FUNC_get_smp_processor_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return &bpf_get_smp_processor_id_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) case BPF_FUNC_get_numa_node_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return &bpf_get_numa_node_id_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) case BPF_FUNC_perf_event_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return &bpf_perf_event_read_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) case BPF_FUNC_current_task_under_cgroup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return &bpf_current_task_under_cgroup_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) case BPF_FUNC_get_prandom_u32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return &bpf_get_prandom_u32_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) case BPF_FUNC_probe_write_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) NULL : bpf_get_probe_write_proto();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) case BPF_FUNC_probe_read_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return &bpf_probe_read_user_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) case BPF_FUNC_probe_read_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) NULL : &bpf_probe_read_kernel_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) case BPF_FUNC_probe_read_user_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return &bpf_probe_read_user_str_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) case BPF_FUNC_probe_read_kernel_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) NULL : &bpf_probe_read_kernel_str_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) case BPF_FUNC_probe_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) NULL : &bpf_probe_read_compat_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) case BPF_FUNC_probe_read_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) NULL : &bpf_probe_read_compat_str_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #ifdef CONFIG_CGROUPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) case BPF_FUNC_get_current_cgroup_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return &bpf_get_current_cgroup_id_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) case BPF_FUNC_send_signal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return &bpf_send_signal_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) case BPF_FUNC_send_signal_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return &bpf_send_signal_thread_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) case BPF_FUNC_perf_event_read_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return &bpf_perf_event_read_value_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) case BPF_FUNC_get_ns_current_pid_tgid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return &bpf_get_ns_current_pid_tgid_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) case BPF_FUNC_ringbuf_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return &bpf_ringbuf_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) case BPF_FUNC_ringbuf_reserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return &bpf_ringbuf_reserve_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) case BPF_FUNC_ringbuf_submit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return &bpf_ringbuf_submit_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) case BPF_FUNC_ringbuf_discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return &bpf_ringbuf_discard_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) case BPF_FUNC_ringbuf_query:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return &bpf_ringbuf_query_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) case BPF_FUNC_jiffies64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return &bpf_jiffies64_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) case BPF_FUNC_get_task_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return &bpf_get_task_stack_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) case BPF_FUNC_copy_from_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) case BPF_FUNC_snprintf_btf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return &bpf_snprintf_btf_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) case BPF_FUNC_per_cpu_ptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return &bpf_per_cpu_ptr_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) case BPF_FUNC_this_cpu_ptr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return &bpf_this_cpu_ptr_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) case BPF_FUNC_perf_event_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return &bpf_perf_event_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) case BPF_FUNC_get_stackid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return &bpf_get_stackid_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case BPF_FUNC_get_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return &bpf_get_stack_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) #ifdef CONFIG_BPF_KPROBE_OVERRIDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) case BPF_FUNC_override_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return &bpf_override_return_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return bpf_tracing_func_proto(func_id, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* bpf+kprobe programs can access fields of 'struct pt_regs' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (off < 0 || off >= sizeof(struct pt_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (off % size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Assertion for 32 bit to make sure last 8 byte access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * (BPF_DW) to the last 4 byte member is disallowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (off + size > sizeof(struct pt_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) const struct bpf_verifier_ops kprobe_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) .get_func_proto = kprobe_prog_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) .is_valid_access = kprobe_prog_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) const struct bpf_prog_ops kprobe_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u64, flags, void *, data, u64, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct pt_regs *regs = *(struct pt_regs **)tp_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * r1 points to perf tracepoint buffer where first 8 bytes are hidden
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * from there and call the same bpf_perf_event_output() helper inline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return ____bpf_perf_event_output(regs, map, flags, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) .func = bpf_perf_event_output_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) .arg2_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) .arg4_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) .arg5_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct pt_regs *regs = *(struct pt_regs **)tp_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * Same comment as in bpf_perf_event_output_tp(), only that this time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * the other helper's function body cannot be inlined due to being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * external, thus we need to call raw helper function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) flags, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) .func = bpf_get_stackid_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) .arg2_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct pt_regs *regs = *(struct pt_regs **)tp_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) (unsigned long) size, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) static const struct bpf_func_proto bpf_get_stack_proto_tp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) .func = bpf_get_stack_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) .arg2_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) .arg3_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) .arg4_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) case BPF_FUNC_perf_event_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return &bpf_perf_event_output_proto_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) case BPF_FUNC_get_stackid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return &bpf_get_stackid_proto_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) case BPF_FUNC_get_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return &bpf_get_stack_proto_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return bpf_tracing_func_proto(func_id, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (off % size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) const struct bpf_verifier_ops tracepoint_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) .get_func_proto = tp_prog_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) .is_valid_access = tp_prog_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) const struct bpf_prog_ops tracepoint_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct bpf_perf_event_value *, buf, u32, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (unlikely(size != sizeof(struct bpf_perf_event_value)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) &buf->running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) memset(buf, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) .func = bpf_perf_prog_read_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) .arg2_type = ARG_PTR_TO_UNINIT_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) .arg3_type = ARG_CONST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) void *, buf, u32, size, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static const u32 br_entry_size = sizeof(struct perf_branch_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct perf_branch_stack *br_stack = ctx->data->br_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) u32 to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (unlikely(!br_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return br_stack->nr * br_entry_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (!buf || (size % br_entry_size != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) memcpy(buf, br_stack->entries, to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) return to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static const struct bpf_func_proto bpf_read_branch_records_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .func = bpf_read_branch_records,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) .arg3_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) .arg4_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) case BPF_FUNC_perf_event_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return &bpf_perf_event_output_proto_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) case BPF_FUNC_get_stackid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return &bpf_get_stackid_proto_pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) case BPF_FUNC_get_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return &bpf_get_stack_proto_pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) case BPF_FUNC_perf_prog_read_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return &bpf_perf_prog_read_value_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) case BPF_FUNC_read_branch_records:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) return &bpf_read_branch_records_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return bpf_tracing_func_proto(func_id, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * to avoid potential recursive reuse issue when/if tracepoints are added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * in normal, irq, and nmi context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct bpf_raw_tp_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct pt_regs regs[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static struct pt_regs *get_bpf_raw_tp_regs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) this_cpu_dec(bpf_raw_tp_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return &tp_regs->regs[nest_level - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static void put_bpf_raw_tp_regs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) this_cpu_dec(bpf_raw_tp_nest_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct bpf_map *, map, u64, flags, void *, data, u64, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct pt_regs *regs = get_bpf_raw_tp_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (IS_ERR(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return PTR_ERR(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) perf_fetch_caller_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ret = ____bpf_perf_event_output(regs, map, flags, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) put_bpf_raw_tp_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) .func = bpf_perf_event_output_raw_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) .arg2_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) .arg4_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) .arg5_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) extern const struct bpf_func_proto bpf_skb_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) extern const struct bpf_func_proto bpf_xdp_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct bpf_map *, map, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct pt_regs *regs = get_bpf_raw_tp_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (IS_ERR(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return PTR_ERR(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) perf_fetch_caller_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) flags, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) put_bpf_raw_tp_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) .func = bpf_get_stackid_raw_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) .arg2_type = ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) .arg3_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) void *, buf, u32, size, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct pt_regs *regs = get_bpf_raw_tp_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (IS_ERR(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return PTR_ERR(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) perf_fetch_caller_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) (unsigned long) size, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) put_bpf_raw_tp_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) .func = bpf_get_stack_raw_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) .gpl_only = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) .ret_type = RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) .arg1_type = ARG_PTR_TO_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) .arg2_type = ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) .arg3_type = ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) .arg4_type = ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case BPF_FUNC_perf_event_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return &bpf_perf_event_output_proto_raw_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) case BPF_FUNC_get_stackid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) return &bpf_get_stackid_proto_raw_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) case BPF_FUNC_get_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return &bpf_get_stack_proto_raw_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) return bpf_tracing_func_proto(func_id, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) const struct bpf_func_proto *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) #ifdef CONFIG_NET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case BPF_FUNC_skb_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return &bpf_skb_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case BPF_FUNC_xdp_output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return &bpf_xdp_output_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) case BPF_FUNC_skc_to_tcp6_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return &bpf_skc_to_tcp6_sock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) case BPF_FUNC_skc_to_tcp_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return &bpf_skc_to_tcp_sock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) case BPF_FUNC_skc_to_tcp_timewait_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return &bpf_skc_to_tcp_timewait_sock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) case BPF_FUNC_skc_to_tcp_request_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return &bpf_skc_to_tcp_request_sock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) case BPF_FUNC_skc_to_udp6_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) return &bpf_skc_to_udp6_sock_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) case BPF_FUNC_seq_printf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return prog->expected_attach_type == BPF_TRACE_ITER ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) &bpf_seq_printf_proto :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) case BPF_FUNC_seq_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return prog->expected_attach_type == BPF_TRACE_ITER ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) &bpf_seq_write_proto :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) case BPF_FUNC_seq_printf_btf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return prog->expected_attach_type == BPF_TRACE_ITER ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) &bpf_seq_printf_btf_proto :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) case BPF_FUNC_d_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return &bpf_d_path_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return raw_tp_prog_func_proto(func_id, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static bool raw_tp_prog_is_valid_access(int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (off % size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static bool tracing_prog_is_valid_access(int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (off % size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return btf_ctx_access(off, size, type, prog, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) const union bpf_attr *kattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) .get_func_proto = raw_tp_prog_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) .is_valid_access = raw_tp_prog_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) const struct bpf_prog_ops raw_tracepoint_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) #ifdef CONFIG_NET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) .test_run = bpf_prog_test_run_raw_tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) const struct bpf_verifier_ops tracing_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) .get_func_proto = tracing_prog_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) .is_valid_access = tracing_prog_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) const struct bpf_prog_ops tracing_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) .test_run = bpf_prog_test_run_tracing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static bool raw_tp_writable_prog_is_valid_access(int off, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (size != sizeof(u64) || type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) info->reg_type = PTR_TO_TP_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return raw_tp_prog_is_valid_access(off, size, type, prog, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) .get_func_proto = raw_tp_prog_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) .is_valid_access = raw_tp_writable_prog_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct bpf_insn_access_aux *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) const int size_u64 = sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (type != BPF_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (off % size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (sizeof(unsigned long) != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (size != 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (off % size != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) switch (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) bpf_ctx_record_field_size(info, size_u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) case bpf_ctx_range(struct bpf_perf_event_data, addr):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) bpf_ctx_record_field_size(info, size_u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (size != sizeof(long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) const struct bpf_insn *si,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct bpf_insn *insn_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) struct bpf_prog *prog, u32 *target_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) struct bpf_insn *insn = insn_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) switch (si->off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) case offsetof(struct bpf_perf_event_data, sample_period):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) data), si->dst_reg, si->src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) offsetof(struct bpf_perf_event_data_kern, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) bpf_target_off(struct perf_sample_data, period, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) target_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) case offsetof(struct bpf_perf_event_data, addr):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) data), si->dst_reg, si->src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) offsetof(struct bpf_perf_event_data_kern, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) bpf_target_off(struct perf_sample_data, addr, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) target_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) regs), si->dst_reg, si->src_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) offsetof(struct bpf_perf_event_data_kern, regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) si->off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) return insn - insn_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) const struct bpf_verifier_ops perf_event_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) .get_func_proto = pe_prog_func_proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) .is_valid_access = pe_prog_is_valid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) .convert_ctx_access = pe_prog_convert_ctx_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) const struct bpf_prog_ops perf_event_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static DEFINE_MUTEX(bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) #define BPF_TRACE_MAX_PROGS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) int perf_event_attach_bpf_prog(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) struct bpf_prog_array *old_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct bpf_prog_array *new_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) int ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * Kprobe override only works if they are on the function entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * and only if they are on the opt-in list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (prog->kprobe_override &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) (!trace_kprobe_on_func_entry(event->tp_event) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) !trace_kprobe_error_injectable(event->tp_event)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) mutex_lock(&bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (event->prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (old_array &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) ret = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* set the new array to event->tp_event and set event->prog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) event->prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) rcu_assign_pointer(event->tp_event->prog_array, new_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) bpf_prog_array_free(old_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) mutex_unlock(&bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) void perf_event_detach_bpf_prog(struct perf_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) struct bpf_prog_array *old_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct bpf_prog_array *new_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) mutex_lock(&bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (!event->prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (ret == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) bpf_prog_array_delete_safe(old_array, event->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) rcu_assign_pointer(event->tp_event->prog_array, new_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) bpf_prog_array_free(old_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) bpf_prog_put(event->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) event->prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) mutex_unlock(&bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) int perf_event_query_prog_array(struct perf_event *event, void __user *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct perf_event_query_bpf __user *uquery = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct perf_event_query_bpf query = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) struct bpf_prog_array *progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) u32 *ids, prog_cnt, ids_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) if (!perfmon_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (event->attr.type != PERF_TYPE_TRACEPOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (copy_from_user(&query, uquery, sizeof(query)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ids_len = query.ids_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (ids_len > BPF_TRACE_MAX_PROGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (!ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * is required when user only wants to check for uquery->prog_cnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * There is no need to check for it since the case is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * gracefully in bpf_prog_array_copy_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) mutex_lock(&bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) mutex_unlock(&bpf_event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) kfree(ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) extern struct bpf_raw_event_map __start__bpf_raw_tp[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) for (; btp < __stop__bpf_raw_tp; btp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (!strcmp(btp->tp->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) return btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return bpf_get_raw_tracepoint_module(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct module *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) mod = __module_address((unsigned long)btp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) module_put(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) cant_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) (void) BPF_PROG_RUN(prog, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) #define UNPACK(...) __VA_ARGS__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) #define REPEAT_1(FN, DL, X, ...) FN(X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) #define SARG(X) u64 arg##X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) #define COPY(X) args[X] = arg##X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) #define __DL_COM (,)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) #define __DL_SEM (;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) #define BPF_TRACE_DEFN_x(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) void bpf_trace_run##x(struct bpf_prog *prog, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) u64 args[x]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) __bpf_trace_run(prog, args); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) EXPORT_SYMBOL_GPL(bpf_trace_run##x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) BPF_TRACE_DEFN_x(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) BPF_TRACE_DEFN_x(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) BPF_TRACE_DEFN_x(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) BPF_TRACE_DEFN_x(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) BPF_TRACE_DEFN_x(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) BPF_TRACE_DEFN_x(6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) BPF_TRACE_DEFN_x(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) BPF_TRACE_DEFN_x(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) BPF_TRACE_DEFN_x(9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) BPF_TRACE_DEFN_x(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) BPF_TRACE_DEFN_x(11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) BPF_TRACE_DEFN_x(12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct tracepoint *tp = btp->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * check that program doesn't access arguments beyond what's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * available in this tracepoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (prog->aux->max_tp_access > btp->writable_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return __bpf_probe_register(btp, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) u32 *fd_type, const char **buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) u64 *probe_offset, u64 *probe_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) bool is_tracepoint, is_syscall_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) int flags, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) prog = event->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) *prog_id = prog->aux->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) flags = event->tp_event->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) is_syscall_tp = is_syscall_trace_event(event->tp_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (is_tracepoint || is_syscall_tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) *buf = is_tracepoint ? event->tp_event->tp->name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) : event->tp_event->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) *fd_type = BPF_FD_TYPE_TRACEPOINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) *probe_offset = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) *probe_addr = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* kprobe/uprobe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) #ifdef CONFIG_KPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (flags & TRACE_EVENT_FL_KPROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) err = bpf_get_kprobe_info(event, fd_type, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) probe_offset, probe_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) event->attr.type == PERF_TYPE_TRACEPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) #ifdef CONFIG_UPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (flags & TRACE_EVENT_FL_UPROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) err = bpf_get_uprobe_info(event, fd_type, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) probe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) event->attr.type == PERF_TYPE_TRACEPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) static int __init send_signal_irq_work_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) struct send_signal_irq_work *work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) work = per_cpu_ptr(&send_signal_work, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) init_irq_work(&work->irq_work, do_bpf_send_signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) subsys_initcall(send_signal_irq_work_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) void *module)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct bpf_trace_module *btm, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) struct module *mod = module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (mod->num_bpf_raw_events == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) mutex_lock(&bpf_module_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) case MODULE_STATE_COMING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) btm = kzalloc(sizeof(*btm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (btm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) btm->module = module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) list_add(&btm->list, &bpf_trace_modules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) case MODULE_STATE_GOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (btm->module == module) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) list_del(&btm->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) kfree(btm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) mutex_unlock(&bpf_module_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static struct notifier_block bpf_module_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) .notifier_call = bpf_event_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) static int __init bpf_event_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) register_module_notifier(&bpf_module_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) fs_initcall(bpf_event_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) #endif /* CONFIG_MODULES */