^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * bpf-loader.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2015 Huawei Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <bpf/libbpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <bpf/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/zalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "bpf-loader.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "bpf-prologue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "probe-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "probe-finder.h" // for MAX_PROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "parse-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "strfilter.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "llvm-utils.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "c++/clang-c.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <internal/xyarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) const char *fmt, va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return veprintf(1, verbose, pr_fmt(fmt), args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct bpf_prog_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) bool is_tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) char *sys_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) char *evt_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct perf_probe_event pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) bool need_prologue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct bpf_insn *insns_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int nr_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int *type_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static bool libbpf_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!libbpf_initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) libbpf_set_print(libbpf_perf_print);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) libbpf_initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (IS_ERR_OR_NULL(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pr_debug("bpf: failed to load buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct bpf_object *bpf__prepare_load(const char *filename, bool source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!libbpf_initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) libbpf_set_print(libbpf_perf_print);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) libbpf_initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (source) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void *obj_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) size_t obj_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) perf_clang__init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) perf_clang__cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pr_debug("bpf: successful builtin compilation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) llvm__dump_obj(filename, obj_buf, obj_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) free(obj_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) obj = bpf_object__open(filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (IS_ERR_OR_NULL(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pr_debug("bpf: failed to load %s\n", filename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void bpf__clear(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct bpf_object *obj, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bpf_object__for_each_safe(obj, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bpf__unprobe(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bpf_object__close(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) clear_prog_priv(struct bpf_program *prog __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void *_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct bpf_prog_priv *priv = _priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) cleanup_perf_probe_events(&priv->pev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) zfree(&priv->insns_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) zfree(&priv->type_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) zfree(&priv->sys_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) zfree(&priv->evt_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) prog_config__exec(const char *value, struct perf_probe_event *pev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pev->uprobes = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pev->target = strdup(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!pev->target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) prog_config__module(const char *value, struct perf_probe_event *pev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) pev->uprobes = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pev->target = strdup(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!pev->target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) prog_config__bool(const char *value, bool *pbool, bool invert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bool bool_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!pbool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) err = strtobool(value, &bool_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *pbool = invert ? !bool_value : bool_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) prog_config__inlines(const char *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct perf_probe_event *pev __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return prog_config__bool(value, &probe_conf.no_inlines, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) prog_config__force(const char *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct perf_probe_event *pev __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return prog_config__bool(value, &probe_conf.force_add, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const char *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) const char *usage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) const char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int (*func)(const char *, struct perf_probe_event *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } bpf_prog_config_terms[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .key = "exec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .usage = "exec=<full path of file>",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) .desc = "Set uprobe target",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) .func = prog_config__exec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .key = "module",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .usage = "module=<module name> ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) .desc = "Set kprobe module",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .func = prog_config__module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .key = "inlines",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .usage = "inlines=[yes|no] ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .desc = "Probe at inline symbol",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .func = prog_config__inlines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .key = "force",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .usage = "force=[yes|no] ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .desc = "Forcibly add events with existing name",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .func = prog_config__force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) do_prog_config(const char *key, const char *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct perf_probe_event *pev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pr_debug("config bpf program: %s=%s\n", key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return bpf_prog_config_terms[i].func(value, pev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pr_debug("\nHint: Valid options are:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bpf_prog_config_terms[i].desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pr_debug("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -BPF_LOADER_ERRNO__PROGCONF_TERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) char *text = strdup(config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) char *sep, *line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const char *main_str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!text) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pr_debug("Not enough memory: dup config_str failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) line = text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) while ((sep = strchr(line, ';'))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) char *equ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *sep = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) equ = strchr(line, '=');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!equ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) pr_warning("WARNING: invalid config in BPF object: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pr_warning("\tShould be 'key=value'.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto nextline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *equ = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err = do_prog_config(line, equ + 1, pev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) nextline:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) line = sep + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) main_str = config_str + (line - text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) free(text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return err ? ERR_PTR(err) : main_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) parse_prog_config(const char *config_str, const char **p_main_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bool *is_tp, struct perf_probe_event *pev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) const char *main_str = parse_prog_config_kvpair(config_str, pev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (IS_ERR(main_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return PTR_ERR(main_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *p_main_str = main_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!strchr(main_str, '=')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Is a tracepoint event? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) const char *s = strchr(main_str, ':');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) pr_debug("bpf: '%s' is not a valid tracepoint\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -BPF_LOADER_ERRNO__CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) *is_tp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *is_tp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) err = parse_perf_probe_command(main_str, pev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pr_debug("bpf: '%s' is not a valid config string\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* parse failed, don't need clear pev. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -BPF_LOADER_ERRNO__CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) config_bpf_program(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct perf_probe_event *pev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct bpf_prog_priv *priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) const char *config_str, *main_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bool is_tp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* Initialize per-program probing setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) probe_conf.no_inlines = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) probe_conf.force_add = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) priv = calloc(sizeof(*priv), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_debug("bpf: failed to alloc priv\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pev = &priv->pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) config_str = bpf_program__section_name(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pr_debug("bpf: config program '%s'\n", config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) err = parse_prog_config(config_str, &main_str, &is_tp, pev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (is_tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) char *s = strchr(main_str, ':');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) priv->is_tp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) priv->sys_name = strndup(main_str, s - main_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) priv->evt_name = strdup(s + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) goto set_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) config_str, PERF_BPF_PROBE_GROUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) err = -BPF_LOADER_ERRNO__GROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } else if (!pev->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pev->group = strdup(PERF_BPF_PROBE_GROUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!pev->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) pr_debug("bpf: strdup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!pev->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) err = -BPF_LOADER_ERRNO__EVENTNAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pr_debug("bpf: config '%s' is ok\n", config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) set_priv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err = bpf_program__set_priv(prog, priv, clear_prog_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pr_debug("Failed to set priv for program '%s'\n", config_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (pev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) clear_perf_probe_event(pev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int bpf__prepare_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static bool initialized = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Make err static, so if init failed the first, bpf__prepare_probe()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * fails each time without calling init_probe_symbol_maps multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) err = init_probe_symbol_maps(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) pr_debug("Failed to init_probe_symbol_maps\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) probe_conf.max_probes = MAX_PROBES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) preproc_gen_prologue(struct bpf_program *prog, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct bpf_insn *orig_insns, int orig_insns_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct bpf_prog_prep_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct bpf_prog_priv *priv = bpf_program__priv(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct probe_trace_event *tev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct perf_probe_event *pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct bpf_insn *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) size_t prologue_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (IS_ERR(priv) || !priv || priv->is_tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pev = &priv->pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (n < 0 || n >= priv->nr_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Find a tev belongs to that type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 0; i < pev->ntevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (priv->type_mapping[i] == n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (i >= pev->ntevs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) pr_debug("Internal error: prologue type %d not found\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return -BPF_LOADER_ERRNO__PROLOGUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) tev = &pev->tevs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) buf = priv->insns_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) err = bpf__gen_prologue(tev->args, tev->nargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) buf, &prologue_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) BPF_MAXINSNS - orig_insns_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) const char *title;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) title = bpf_program__section_name(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) pr_debug("Failed to generate prologue for program %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) title);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) memcpy(&buf[prologue_cnt], orig_insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sizeof(struct bpf_insn) * orig_insns_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) res->new_insn_ptr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) res->pfd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pr_debug("Internal error in preproc_gen_prologue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -BPF_LOADER_ERRNO__PROLOGUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * compare_tev_args is reflexive, transitive and antisymmetric.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * I can proof it but this margin is too narrow to contain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int compare_tev_args(const void *ptev1, const void *ptev2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) const struct probe_trace_event *tev1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *(const struct probe_trace_event **)ptev1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) const struct probe_trace_event *tev2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) *(const struct probe_trace_event **)ptev2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = tev2->nargs - tev1->nargs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) for (i = 0; i < tev1->nargs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct probe_trace_arg *arg1, *arg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct probe_trace_arg_ref *ref1, *ref2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) arg1 = &tev1->args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) arg2 = &tev2->args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = strcmp(arg1->value, arg2->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ref1 = arg1->ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ref2 = arg2->ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) while (ref1 && ref2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ret = ref2->offset - ref1->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ref1 = ref1->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ref2 = ref2->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (ref1 || ref2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return ref2 ? 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Assign a type number to each tevs in a pev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * mapping is an array with same slots as tevs in that pev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * nr_types will be set to number of types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static int map_prologue(struct perf_probe_event *pev, int *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int *nr_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) int i, type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct probe_trace_event **ptevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) size_t array_sz = sizeof(*ptevs) * pev->ntevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ptevs = malloc(array_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!ptevs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pr_debug("Not enough memory: alloc ptevs failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) for (i = 0; i < pev->ntevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ptevs[i] = &pev->tevs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) qsort(ptevs, pev->ntevs, sizeof(*ptevs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) compare_tev_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) for (i = 0; i < pev->ntevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) n = ptevs[i] - pev->tevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mapping[n] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pr_debug("mapping[%d]=%d\n", n, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) mapping[n] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mapping[n] = ++type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pr_debug("mapping[%d]=%d\n", n, mapping[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) free(ptevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *nr_types = type + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int hook_load_preprocessor(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct bpf_prog_priv *priv = bpf_program__priv(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct perf_probe_event *pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) bool need_prologue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (IS_ERR(priv) || !priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pr_debug("Internal error when hook preprocessor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (priv->is_tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) priv->need_prologue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pev = &priv->pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) for (i = 0; i < pev->ntevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct probe_trace_event *tev = &pev->tevs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (tev->nargs > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) need_prologue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Since all tevs don't have argument, we don't need generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * prologue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!need_prologue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) priv->need_prologue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) priv->need_prologue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!priv->insns_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) pr_debug("Not enough memory: alloc insns_buf failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (!priv->type_mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) pr_debug("Not enough memory: alloc type_mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memset(priv->type_mapping, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) sizeof(int) * pev->ntevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) err = bpf_program__set_prep(prog, priv->nr_types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) preproc_gen_prologue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int bpf__probe(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct bpf_prog_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct perf_probe_event *pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) err = bpf__prepare_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) pr_debug("bpf__prepare_probe failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) err = config_bpf_program(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) priv = bpf_program__priv(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (IS_ERR(priv) || !priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) err = PTR_ERR(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (priv->is_tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) bpf_program__set_tracepoint(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) bpf_program__set_kprobe(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) pev = &priv->pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) err = convert_perf_probe_events(pev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pr_debug("bpf_probe: failed to convert perf probe events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) err = apply_perf_probe_events(pev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) pr_debug("bpf_probe: failed to apply perf probe events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * After probing, let's consider prologue, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * adds program fetcher to BPF programs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * hook_load_preprocessorr() hooks pre-processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * to bpf_program, let it generate prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * dynamically during loading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) err = hook_load_preprocessor(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return err < 0 ? err : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) #define EVENTS_WRITE_BUFSIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int bpf__unprobe(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct bpf_prog_priv *priv = bpf_program__priv(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (IS_ERR(priv) || !priv || priv->is_tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) for (i = 0; i < priv->pev.ntevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct probe_trace_event *tev = &priv->pev.tevs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) char name_buf[EVENTS_WRITE_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct strfilter *delfilter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) "%s:%s", tev->group, tev->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) delfilter = strfilter__new(name_buf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (!delfilter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) pr_debug("Failed to create filter for unprobing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err = del_perf_probe_events(delfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) strfilter__delete(delfilter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pr_debug("Failed to delete %s\n", name_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int bpf__load(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err = bpf_object__load(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) char bf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) libbpf_strerror(err, bf, sizeof(bf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int bpf__foreach_event(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) bpf_prog_iter_callback_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct bpf_prog_priv *priv = bpf_program__priv(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct probe_trace_event *tev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct perf_probe_event *pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int i, fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (IS_ERR(priv) || !priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) pr_debug("bpf: failed to get private field\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (priv->is_tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pr_debug("bpf: tracepoint call back failed, stop iterate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) pev = &priv->pev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) for (i = 0; i < pev->ntevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) tev = &pev->tevs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (priv->need_prologue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) int type = priv->type_mapping[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) fd = bpf_program__nth_fd(prog, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pr_debug("bpf: failed to get file descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) err = (*func)(tev->group, tev->event, fd, obj, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pr_debug("bpf: call back failed, stop iterate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) enum bpf_map_op_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) BPF_MAP_OP_SET_VALUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) BPF_MAP_OP_SET_EVSEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) enum bpf_map_key_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) BPF_MAP_KEY_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) BPF_MAP_KEY_RANGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct bpf_map_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) enum bpf_map_op_type op_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) enum bpf_map_key_type key_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct parse_events_array array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) } k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) } v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct bpf_map_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct list_head ops_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) bpf_map_op__delete(struct bpf_map_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!list_empty(&op->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) list_del_init(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (op->key_type == BPF_MAP_KEY_RANGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) parse_events__clear_array(&op->k.array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) free(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) bpf_map_priv__purge(struct bpf_map_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct bpf_map_op *pos, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) list_del_init(&pos->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) bpf_map_op__delete(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) void *_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct bpf_map_priv *priv = _priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bpf_map_priv__purge(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) op->key_type = BPF_MAP_KEY_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!term)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (term->array.nr_ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) size_t memsz = term->array.nr_ranges *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sizeof(op->k.array.ranges[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) op->k.array.ranges = memdup(term->array.ranges, memsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!op->k.array.ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) pr_debug("Not enough memory to alloc indices for map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) op->key_type = BPF_MAP_KEY_RANGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) op->k.array.nr_ranges = term->array.nr_ranges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static struct bpf_map_op *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) bpf_map_op__new(struct parse_events_term *term)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct bpf_map_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) op = zalloc(sizeof(*op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (!op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) pr_debug("Failed to alloc bpf_map_op\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) INIT_LIST_HEAD(&op->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) err = bpf_map_op_setkey(op, term);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) free(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static struct bpf_map_op *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) bpf_map_op__clone(struct bpf_map_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct bpf_map_op *newop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) newop = memdup(op, sizeof(*op));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (!newop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pr_debug("Failed to alloc bpf_map_op\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) INIT_LIST_HEAD(&newop->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (op->key_type == BPF_MAP_KEY_RANGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) size_t memsz = op->k.array.nr_ranges *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) sizeof(op->k.array.ranges[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (!newop->k.array.ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pr_debug("Failed to alloc indices for map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) free(newop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return newop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static struct bpf_map_priv *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) bpf_map_priv__clone(struct bpf_map_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct bpf_map_priv *newpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct bpf_map_op *pos, *newop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) newpriv = zalloc(sizeof(*newpriv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!newpriv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pr_debug("Not enough memory to alloc map private\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) INIT_LIST_HEAD(&newpriv->ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) list_for_each_entry(pos, &priv->ops_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) newop = bpf_map_op__clone(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!newop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) bpf_map_priv__purge(newpriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) list_add_tail(&newop->list, &newpriv->ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return newpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) const char *map_name = bpf_map__name(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct bpf_map_priv *priv = bpf_map__priv(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (IS_ERR(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) pr_debug("Failed to get private from map %s\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return PTR_ERR(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) priv = zalloc(sizeof(*priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) pr_debug("Not enough memory to alloc map private\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) INIT_LIST_HEAD(&priv->ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) list_add_tail(&op->list, &priv->ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static struct bpf_map_op *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct bpf_map_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) op = bpf_map_op__new(term);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) err = bpf_map__add_op(map, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) bpf_map_op__delete(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) __bpf_map__config_value(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct parse_events_term *term)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct bpf_map_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) const char *map_name = bpf_map__name(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) const struct bpf_map_def *def = bpf_map__def(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (IS_ERR(def)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) pr_debug("Unable to get map definition from '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (def->type != BPF_MAP_TYPE_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (def->key_size < sizeof(unsigned int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pr_debug("Map %s has incorrect key size\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) switch (def->value_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) pr_debug("Map %s has incorrect value size\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) op = bpf_map__add_newop(map, term);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) op->op_type = BPF_MAP_OP_SET_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) op->v.value = term->val.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) bpf_map__config_value(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct parse_events_term *term,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct evlist *evlist __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (!term->err_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) pr_debug("Config value not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return -BPF_LOADER_ERRNO__OBJCONF_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) pr_debug("ERROR: wrong value type for 'value'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return __bpf_map__config_value(map, term);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) __bpf_map__config_event(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct parse_events_term *term,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct evsel *evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) const struct bpf_map_def *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct bpf_map_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) const char *map_name = bpf_map__name(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) pr_debug("Event (for '%s') '%s' doesn't exist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) map_name, term->val.str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) def = bpf_map__def(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (IS_ERR(def)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pr_debug("Unable to get map definition from '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return PTR_ERR(def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * No need to check key_size and value_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * kernel has already checked them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) op = bpf_map__add_newop(map, term);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return PTR_ERR(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) op->op_type = BPF_MAP_OP_SET_EVSEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) op->v.evsel = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) bpf_map__config_event(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct parse_events_term *term,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!term->err_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) pr_debug("Config value not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return -BPF_LOADER_ERRNO__OBJCONF_CONF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) pr_debug("ERROR: wrong value type for 'event'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return __bpf_map__config_event(map, term, evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct bpf_obj_config__map_func {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) const char *config_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int (*config_func)(struct bpf_map *, struct parse_events_term *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct evlist *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {"value", bpf_map__config_value},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {"event", bpf_map__config_event},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) config_map_indices_range_check(struct parse_events_term *term,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) const char *map_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct parse_events_array *array = &term->array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) const struct bpf_map_def *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!array->nr_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (!array->ranges) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) map_name, (int)array->nr_ranges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) def = bpf_map__def(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (IS_ERR(def)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) pr_debug("ERROR: Unable to get map definition from '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) for (i = 0; i < array->nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) unsigned int start = array->ranges[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) size_t length = array->ranges[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) unsigned int idx = start + length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (idx >= def->max_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) pr_debug("ERROR: index %d too large\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) bpf__obj_config_map(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct parse_events_term *term,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int *key_scan_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* key is "map:<mapname>.<config opt>" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) char *map_name = strdup(term->config + sizeof("map:") - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) char *map_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!map_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) map_opt = strchr(map_name, '.');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!map_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pr_debug("ERROR: Invalid map config: %s\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *map_opt++ = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (*map_opt == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) pr_debug("ERROR: Invalid map option: %s\n", term->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) map = bpf_object__find_map_by_name(obj, map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) pr_debug("ERROR: Map %s doesn't exist\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) *key_scan_pos += strlen(map_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) err = config_map_indices_range_check(term, map, map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) *key_scan_pos -= strlen(map_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct bpf_obj_config__map_func *func =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) &bpf_obj_config__map_funcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (strcmp(map_opt, func->config_opt) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) err = func->config_func(map, term, evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) *key_scan_pos += strlen(map_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) free(map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) int bpf__config_obj(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct parse_events_term *term,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct evlist *evlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int *error_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int key_scan_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!obj || !term || !term->config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (strstarts(term->config, "map:")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) key_scan_pos = sizeof("map:") - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (error_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) *error_pos = key_scan_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) typedef int (*map_config_func_t)(const char *name, int map_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) const struct bpf_map_def *pdef,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct bpf_map_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) void *pkey, void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) foreach_key_array_all(map_config_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) void *arg, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int map_fd, const struct bpf_map_def *pdef,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct bpf_map_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) for (i = 0; i < pdef->max_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) err = func(name, map_fd, pdef, op, &i, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) pr_debug("ERROR: failed to insert value to %s[%u]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) foreach_key_array_ranges(map_config_func_t func, void *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) const char *name, int map_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) const struct bpf_map_def *pdef,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct bpf_map_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) for (i = 0; i < op->k.array.nr_ranges; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) unsigned int start = op->k.array.ranges[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) size_t length = op->k.array.ranges[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) for (j = 0; j < length; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) unsigned int idx = start + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err = func(name, map_fd, pdef, op, &idx, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) pr_debug("ERROR: failed to insert value to %s[%u]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) name, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) bpf_map_config_foreach_key(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) map_config_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) int err, map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct bpf_map_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) const struct bpf_map_def *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) const char *name = bpf_map__name(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct bpf_map_priv *priv = bpf_map__priv(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (IS_ERR(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) pr_debug("ERROR: failed to get private from map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (!priv || list_empty(&priv->ops_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) pr_debug("INFO: nothing to config for map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) def = bpf_map__def(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (IS_ERR(def)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) pr_debug("ERROR: failed to get definition from map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) map_fd = bpf_map__fd(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (map_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) pr_debug("ERROR: failed to get fd from map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) list_for_each_entry(op, &priv->ops_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) switch (def->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) case BPF_MAP_TYPE_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) switch (op->key_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) case BPF_MAP_KEY_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) err = foreach_key_array_all(func, arg, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) map_fd, def, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case BPF_MAP_KEY_RANGES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) err = foreach_key_array_ranges(func, arg, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) map_fd, def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) pr_debug("ERROR: keytype for map '%s' invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) pr_debug("ERROR: type of '%s' incorrect\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) apply_config_value_for_key(int map_fd, void *pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) size_t val_size, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) switch (val_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) case 1: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) u8 _val = (u8)(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) case 2: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) u16 _val = (u16)(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) case 4: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) u32 _val = (u32)(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) case 8: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) pr_debug("ERROR: invalid value size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (err && errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct evsel *evsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct xyarray *xy = evsel->core.fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct perf_event_attr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) unsigned int key, events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) bool check_pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int *evt_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (!xy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) pr_debug("ERROR: evsel not ready for map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (xy->row_size / xy->entry_size != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) attr = &evsel->core.attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (attr->inherit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) pr_debug("ERROR: Can't put inherit event into map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (evsel__is_bpf_output(evsel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) check_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (attr->type == PERF_TYPE_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) check_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (attr->type == PERF_TYPE_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) check_pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (!check_pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) pr_debug("ERROR: Event type is wrong for map %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) events = xy->entries / (xy->row_size / xy->entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) key = *((unsigned int *)pkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (key >= events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) pr_debug("ERROR: there is no event %d for map %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) key, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) evt_fd = xyarray__entry(xy, key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (err && errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) apply_obj_config_map_for_key(const char *name, int map_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) const struct bpf_map_def *pdef,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct bpf_map_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) void *pkey, void *arg __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) switch (op->op_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) case BPF_MAP_OP_SET_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) err = apply_config_value_for_key(map_fd, pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) pdef->value_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) op->v.value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) case BPF_MAP_OP_SET_EVSEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) err = apply_config_evsel_for_key(name, map_fd, pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) op->v.evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) pr_debug("ERROR: unknown value type for '%s'\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) err = -BPF_LOADER_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) apply_obj_config_map(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return bpf_map_config_foreach_key(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) apply_obj_config_map_for_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) apply_obj_config_object(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) bpf_object__for_each_map(map, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) err = apply_obj_config_map(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) int bpf__apply_obj_config(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct bpf_object *obj, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) bpf_object__for_each_safe(obj, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) err = apply_obj_config_object(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) #define bpf__for_each_map(pos, obj, objtmp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) bpf_object__for_each_safe(obj, objtmp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) bpf_object__for_each_map(pos, obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) #define bpf__for_each_map_named(pos, obj, objtmp, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) bpf__for_each_map(pos, obj, objtmp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (bpf_map__name(pos) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) (strcmp(name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) bpf_map__name(pos)) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct bpf_map_priv *tmpl_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct bpf_object *obj, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct evsel *evsel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) bool need_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) bpf__for_each_map_named(map, obj, tmp, name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct bpf_map_priv *priv = bpf_map__priv(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (IS_ERR(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * No need to check map type: type should have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * verified by kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if (!need_init && !priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) need_init = !priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (!tmpl_priv && priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) tmpl_priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (!need_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (!tmpl_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) char *event_definition = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) err = parse_events(evlist, event_definition, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) free(event_definition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return ERR_PTR(-err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) evsel = evlist__last(evlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) bpf__for_each_map_named(map, obj, tmp, name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct bpf_map_priv *priv = bpf_map__priv(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (IS_ERR(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (tmpl_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) priv = bpf_map_priv__clone(tmpl_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) bpf_map_priv__clear(map, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) } else if (evsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct bpf_map_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) op = bpf_map__add_newop(map, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (IS_ERR(op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return ERR_CAST(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) op->op_type = BPF_MAP_OP_SET_EVSEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) op->v.evsel = evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return evsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int bpf__setup_stdout(struct evlist *evlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return PTR_ERR_OR_ZERO(evsel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static const char *bpf_loader_strerror_table[NR_ERRNO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) bpf_loader_strerror(int err, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) char sbuf[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (!buf || !size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) err = err > 0 ? err : -err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (err >= __LIBBPF_ERRNO__START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return libbpf_strerror(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) snprintf(buf, size, "%s", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) buf[size - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (err >= __BPF_LOADER_ERRNO__END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) snprintf(buf, size, "Unknown bpf loader error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) snprintf(buf, size, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) str_error_r(err, sbuf, sizeof(sbuf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) buf[size - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) #define bpf__strerror_head(err, buf, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) char sbuf[STRERR_BUFSIZE], *emsg;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!size)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return 0;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (err < 0)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) err = -err;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) emsg = sbuf;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) switch (err) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) default:\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) scnprintf(buf, size, "%s", emsg);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) #define bpf__strerror_entry(val, fmt...)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case val: {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) scnprintf(buf, size, fmt);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) break;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #define bpf__strerror_end(buf, size)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) buf[size - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) int bpf__strerror_prepare_load(const char *filename, bool source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) int err, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) size_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) n = snprintf(buf, size, "Failed to load %s%s: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) filename, source ? " from source" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (n >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) buf[size - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) buf += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) size -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ret = bpf_loader_strerror(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) buf[size - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) int err, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) bpf__strerror_head(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) case BPF_LOADER_ERRNO__PROGCONF_TERM: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) scnprintf(buf, size, "%s (add -v to see detail)", emsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) bpf__strerror_entry(EACCES, "You need to be root");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) bpf__strerror_end(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) int bpf__strerror_load(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) int err, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) bpf__strerror_head(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) case LIBBPF_ERRNO__KVER: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) unsigned int obj_kver = bpf_object__kversion(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) unsigned int real_kver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (fetch_kernel_version(&real_kver, NULL, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) scnprintf(buf, size, "Unable to fetch kernel version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (obj_kver != real_kver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) scnprintf(buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) KVER_PARAM(obj_kver),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) KVER_PARAM(real_kver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) scnprintf(buf, size, "Failed to load program for unknown reason");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) bpf__strerror_end(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct parse_events_term *term __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct evlist *evlist __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) int *error_pos __maybe_unused, int err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) bpf__strerror_head(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) "Can't use this config term with this map type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) bpf__strerror_end(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) bpf__strerror_head(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) "Cannot set event to BPF map in multi-thread tracing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) "%s (Hint: use -i to turn off inherit)", emsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) "Can only put raw, hardware and BPF output event into a BPF map");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) bpf__strerror_end(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) int err, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) bpf__strerror_head(err, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) bpf__strerror_end(buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) }