^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <bpf/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <bpf/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <bpf/libbpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <internal/lib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <symbol/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "bpf-event.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "dso.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "symbol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "machine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "env.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "evlist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "record.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "util/synthetic-events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static int machine__process_bpf_event_load(struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct perf_sample *sample __maybe_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct bpf_prog_info_linear *info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct bpf_prog_info_node *info_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct perf_env *env = machine->env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int id = event->bpf.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* perf-record, no need to handle bpf-event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (env == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) info_node = perf_env__find_bpf_prog_info(env, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (!info_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) info_linear = info_node->info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u64 addr = addrs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct map *map = maps__find(&machine->kmaps, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) map->dso->bpf_prog.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) map->dso->bpf_prog.sub_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) map->dso->bpf_prog.env = env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int machine__process_bpf(struct machine *machine, union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct perf_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (dump_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) perf_event__fprintf_bpf(event, stdout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) switch (event->bpf.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) case PERF_BPF_EVENT_PROG_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return machine__process_bpf_event_load(machine, event, sample);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) case PERF_BPF_EVENT_PROG_UNLOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Do not free bpf_prog_info and btf of the program here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * as annotation still need them. They will be freed at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * the end of the session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int perf_env__fetch_btf(struct perf_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct btf *btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct btf_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) const void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) data = btf__get_raw_data(btf, &data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) node = malloc(data_size + sizeof(struct btf_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) node->id = btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) node->data_size = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) memcpy(node->data, data, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!perf_env__insert_btf(env, node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Insertion failed because of a duplicate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int synthesize_bpf_prog_name(char *buf, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct bpf_prog_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 sub_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void *func_infos = (void *)(uintptr_t)(info->func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 sub_prog_cnt = info->nr_jited_ksyms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) const struct bpf_func_info *finfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) const char *short_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) name_len = snprintf(buf, size, "bpf_prog_");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) name_len += snprintf_hex(buf + name_len, size - name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) prog_tags[sub_id], BPF_TAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (btf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) finfo = func_infos + sub_id * info->func_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) t = btf__type_by_id(btf, finfo->type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) short_name = btf__name_by_offset(btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) } else if (sub_id == 0 && sub_prog_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* no subprog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (info->name[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) short_name = info->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) short_name = "F";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (short_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) name_len += snprintf(buf + name_len, size - name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) "_%s", short_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * one PERF_RECORD_KSYMBOL is generated for each sub program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * 0 for success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * -1 for failures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * -2 for lack of kernel support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) perf_event__handler_t process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) union perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct record_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct perf_record_bpf_event *bpf_event = &event->bpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct bpf_prog_info_linear *info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct perf_tool *tool = session->tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct bpf_prog_info_node *info_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct bpf_prog_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct btf *btf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct perf_env *env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 sub_prog_cnt, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u64 arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * for perf-record and perf-report use header.env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * otherwise, use global perf_env.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) env = session->data ? &session->header.env : &perf_env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) info_linear = bpf_program__get_prog_info_linear(fd, arrays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (IS_ERR_OR_NULL(info_linear)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) info_linear = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pr_debug("%s: the kernel is too old, aborting\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) info = &info_linear->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* number of ksyms, func_lengths, and tags should match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) sub_prog_cnt = info->nr_jited_ksyms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (sub_prog_cnt != info->nr_prog_tags ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sub_prog_cnt != info->nr_jited_func_lens)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* check BTF func info support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* btf func info number should be same as sub_prog_cnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (sub_prog_cnt != info->nr_func_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (btf__get_from_id(info->btf_id, &btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) btf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) perf_env__fetch_btf(env, info->btf_id, btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Synthesize PERF_RECORD_KSYMBOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) for (i = 0; i < sub_prog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *ksymbol_event = (struct perf_record_ksymbol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .header = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .type = PERF_RECORD_KSYMBOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .size = offsetof(struct perf_record_ksymbol, name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .addr = prog_addrs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .len = prog_lens[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) name_len = synthesize_bpf_prog_name(ksymbol_event->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) KSYM_NAME_LEN, info, btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) memset((void *)event + event->header.size, 0, machine->id_hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) event->header.size += machine->id_hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) err = perf_tool__process_synth_event(tool, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) machine, process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!opts->no_bpf_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Synthesize PERF_RECORD_BPF_EVENT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *bpf_event = (struct perf_record_bpf_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .header = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .type = PERF_RECORD_BPF_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .size = sizeof(struct perf_record_bpf_event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .type = PERF_BPF_EVENT_PROG_LOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .id = info->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) memset((void *)event + event->header.size, 0, machine->id_hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) event->header.size += machine->id_hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* save bpf_prog_info to env */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) info_node = malloc(sizeof(struct bpf_prog_info_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!info_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) info_node->info_linear = info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) perf_env__insert_bpf_prog_info(env, info_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) info_linear = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * process after saving bpf_prog_info to env, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * required information is ready for look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) err = perf_tool__process_synth_event(tool, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) machine, process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) free(info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) free(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return err ? -1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct kallsyms_parse {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) perf_event__handler_t process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct machine *machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct perf_tool *tool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct machine *machine = data->machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) union perf_event *event = data->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct perf_record_ksymbol *ksymbol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ksymbol = &event->ksymbol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *ksymbol = (struct perf_record_ksymbol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .header = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) .type = PERF_RECORD_KSYMBOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .size = offsetof(struct perf_record_ksymbol, name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .addr = addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .len = page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) memset((void *) event + event->header.size, 0, machine->id_hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) event->header.size += machine->id_hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return perf_tool__process_synth_event(data->tool, event, machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) data->process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) kallsyms_process_symbol(void *data, const char *_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) char type __maybe_unused, u64 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) char disp[KSYM_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) char *module, *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) module = strchr(_name, '\t');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!module)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* We are going after [bpf] module ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (strcmp(module + 1, "[bpf]"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) name = memdup(_name, (module - _name) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) name[module - _name] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* .. and only for trampolines and dispatchers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) (sscanf(name, "bpf_dispatcher_%s", disp) == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) err = process_bpf_image(name, start, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) free(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int perf_event__synthesize_bpf_events(struct perf_session *session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) perf_event__handler_t process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct machine *machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct record_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) const char *kallsyms_filename = "/proc/kallsyms";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct kallsyms_parse arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) union perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __u32 id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Synthesize all the bpf programs in system. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) err = bpf_prog_get_next_id(id, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (errno == ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pr_debug("%s: can't get next program: %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) __func__, strerror(errno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) errno == EINVAL ? " -- kernel too old?" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* don't report error on old kernel or EPERM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) fd = bpf_prog_get_fd_by_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) pr_debug("%s: failed to get fd for prog_id %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) __func__, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) err = perf_event__synthesize_one_bpf_prog(session, process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) machine, fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) event, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* do not return error for old kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (err == -2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Synthesize all the bpf images - trampolines/dispatchers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (symbol_conf.kallsyms_name != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kallsyms_filename = symbol_conf.kallsyms_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) arg = (struct kallsyms_parse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) .event = event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .process = process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .machine = machine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .tool = session->tool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) pr_err("%s: failed to synthesize bpf images: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) __func__, strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) free(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct bpf_prog_info_linear *info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct bpf_prog_info_node *info_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct btf *btf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u64 arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u32 btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) fd = bpf_prog_get_fd_by_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) info_linear = bpf_program__get_prog_info_linear(fd, arrays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (IS_ERR_OR_NULL(info_linear)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) btf_id = info_linear->info.btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) info_node = malloc(sizeof(struct bpf_prog_info_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (info_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) info_node->info_linear = info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) perf_env__insert_bpf_prog_info(env, info_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) free(info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (btf_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (btf__get_from_id(btf_id, &btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pr_debug("%s: failed to get BTF of id %u, aborting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __func__, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) perf_env__fetch_btf(env, btf_id, btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) free(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int bpf_event__sb_cb(union perf_event *event, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct perf_env *env = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (event->header.type != PERF_RECORD_BPF_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) switch (event->bpf.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case PERF_BPF_EVENT_PROG_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) perf_env__add_bpf_info(env, event->bpf.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) case PERF_BPF_EVENT_PROG_UNLOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Do not free bpf_prog_info and btf of the program here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * as annotation still need them. They will be freed at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * the end of the session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct perf_event_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .type = PERF_TYPE_SOFTWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .config = PERF_COUNT_SW_DUMMY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .sample_id_all = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .watermark = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .bpf_event = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .size = sizeof(attr), /* to capture ABI version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Older gcc versions don't support designated initializers, like above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * for unnamed union members, such as the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) attr.wakeup_watermark = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct perf_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) FILE *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) char name[KSYM_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct btf *btf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) u32 sub_prog_cnt, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) sub_prog_cnt = info->nr_jited_ksyms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (sub_prog_cnt != info->nr_prog_tags ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) sub_prog_cnt != info->nr_jited_func_lens)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (info->btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct btf_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) node = perf_env__find_btf(env, info->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) btf = btf__new((__u8 *)(node->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) node->data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (sub_prog_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) info->id, name, prog_addrs[0], prog_lens[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) fprintf(fp, "# bpf_prog_info %u:\n", info->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) for (i = 0; i < sub_prog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) i, name, prog_addrs[i], prog_lens[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) btf__free(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }