Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     1) // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     4)  * Common eBPF ELF object loading operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     6)  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     7)  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     8)  * Copyright (C) 2015 Huawei Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     9)  * Copyright (C) 2017 Nicira, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    10)  * Copyright (C) 2019 Isovalent, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    13) #ifndef _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    14) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    15) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    16) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    17) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    18) #include <stdarg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    19) #include <libgen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    20) #include <inttypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    21) #include <limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    22) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    23) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    24) #include <endian.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    25) #include <fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    26) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    27) #include <ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    28) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    29) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    30) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    31) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    32) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    33) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    34) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    35) #include <linux/limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    36) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    37) #include <linux/ring_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    38) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    39) #include <sys/epoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    40) #include <sys/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    41) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    42) #include <sys/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    43) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    44) #include <sys/vfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    45) #include <sys/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    46) #include <sys/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    47) #include <libelf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    48) #include <gelf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    49) #include <zlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    51) #include "libbpf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    52) #include "bpf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    53) #include "btf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    54) #include "str_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    55) #include "libbpf_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    56) #include "hashmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    58) #ifndef EM_BPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    59) #define EM_BPF 247
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    60) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    62) #ifndef BPF_FS_MAGIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    63) #define BPF_FS_MAGIC		0xcafe4a11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    66) #define BPF_INSN_SZ (sizeof(struct bpf_insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    68) /* vsprintf() in __base_pr() uses nonliteral format string. It may break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    69)  * compilation if user enables corresponding warning. Disable it explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    71) #pragma GCC diagnostic ignored "-Wformat-nonliteral"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    73) #define __printf(a, b)	__attribute__((format(printf, a, b)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    75) static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    76) static const struct btf_type *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    77) skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    79) static int __base_pr(enum libbpf_print_level level, const char *format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    80) 		     va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    82) 	if (level == LIBBPF_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    83) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    85) 	return vfprintf(stderr, format, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    88) static libbpf_print_fn_t __libbpf_pr = __base_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    90) libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    92) 	libbpf_print_fn_t old_print_fn = __libbpf_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    94) 	__libbpf_pr = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    95) 	return old_print_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    98) __printf(2, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    99) void libbpf_print(enum libbpf_print_level level, const char *format, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   101) 	va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   103) 	if (!__libbpf_pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   104) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   106) 	va_start(args, format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   107) 	__libbpf_pr(level, format, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   108) 	va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   111) static void pr_perm_msg(int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   113) 	struct rlimit limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   114) 	char buf[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   116) 	if (err != -EPERM || geteuid() != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   117) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   119) 	err = getrlimit(RLIMIT_MEMLOCK, &limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   120) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   121) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   123) 	if (limit.rlim_cur == RLIM_INFINITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   124) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   126) 	if (limit.rlim_cur < 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   127) 		snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   128) 	else if (limit.rlim_cur < 1024*1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   129) 		snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   130) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   131) 		snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   133) 	pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   134) 		buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   137) #define STRERR_BUFSIZE  128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   139) /* Copied from tools/perf/util/util.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   140) #ifndef zfree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   141) # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   144) #ifndef zclose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   145) # define zclose(fd) ({			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   146) 	int ___err = 0;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   147) 	if ((fd) >= 0)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   148) 		___err = close((fd));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   149) 	fd = -1;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   150) 	___err; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   153) static inline __u64 ptr_to_u64(const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   155) 	return (__u64) (unsigned long) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   158) enum kern_feature_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   159) 	/* v4.14: kernel support for program & map names. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   160) 	FEAT_PROG_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   161) 	/* v5.2: kernel support for global data sections. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   162) 	FEAT_GLOBAL_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   163) 	/* BTF support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   164) 	FEAT_BTF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   165) 	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   166) 	FEAT_BTF_FUNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   167) 	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   168) 	FEAT_BTF_DATASEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   169) 	/* BTF_FUNC_GLOBAL is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   170) 	FEAT_BTF_GLOBAL_FUNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   171) 	/* BPF_F_MMAPABLE is supported for arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   172) 	FEAT_ARRAY_MMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   173) 	/* kernel support for expected_attach_type in BPF_PROG_LOAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   174) 	FEAT_EXP_ATTACH_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   175) 	/* bpf_probe_read_{kernel,user}[_str] helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   176) 	FEAT_PROBE_READ_KERN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   177) 	/* BPF_PROG_BIND_MAP is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   178) 	FEAT_PROG_BIND_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   179) 	__FEAT_CNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   182) static bool kernel_supports(enum kern_feature_id feat_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   184) enum reloc_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   185) 	RELO_LD64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   186) 	RELO_CALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   187) 	RELO_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   188) 	RELO_EXTERN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   191) struct reloc_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   192) 	enum reloc_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   193) 	int insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   194) 	int map_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   195) 	int sym_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   196) 	bool processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   199) struct bpf_sec_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   201) typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   202) 					struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   204) struct bpf_sec_def {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   205) 	const char *sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   206) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   207) 	enum bpf_prog_type prog_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   208) 	enum bpf_attach_type expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   209) 	bool is_exp_attach_type_optional;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   210) 	bool is_attachable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   211) 	bool is_attach_btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   212) 	bool is_sleepable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   213) 	attach_fn_t attach_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   217)  * bpf_prog should be a better name but it has been used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   218)  * linux/filter.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   220) struct bpf_program {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   221) 	const struct bpf_sec_def *sec_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   222) 	char *sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   223) 	size_t sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   224) 	/* this program's instruction offset (in number of instructions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   225) 	 * within its containing ELF section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   226) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   227) 	size_t sec_insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   228) 	/* number of original instructions in ELF section belonging to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   229) 	 * program, not taking into account subprogram instructions possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   230) 	 * appended later during relocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   232) 	size_t sec_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   233) 	/* Offset (in number of instructions) of the start of instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   234) 	 * belonging to this BPF program  within its containing main BPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   235) 	 * program. For the entry-point (main) BPF program, this is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   236) 	 * zero. For a sub-program, this gets reset before each of main BPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   237) 	 * programs are processed and relocated and is used to determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   238) 	 * whether sub-program was already appended to the main program, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   239) 	 * if yes, at which instruction offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   241) 	size_t sub_insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   243) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   244) 	/* sec_name with / replaced by _; makes recursive pinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   245) 	 * in bpf_object__pin_programs easier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   246) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   247) 	char *pin_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   249) 	/* instructions that belong to BPF program; insns[0] is located at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   250) 	 * sec_insn_off instruction within its ELF section in ELF file, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   251) 	 * when mapping ELF file instruction index to the local instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   252) 	 * one needs to subtract sec_insn_off; and vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   254) 	struct bpf_insn *insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   255) 	/* actual number of instruction in this BPF program's image; for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   256) 	 * entry-point BPF programs this includes the size of main program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   257) 	 * itself plus all the used sub-programs, appended at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   259) 	size_t insns_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   261) 	struct reloc_desc *reloc_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   262) 	int nr_reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   263) 	int log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   265) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   266) 		int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   267) 		int *fds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   268) 	} instances;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   269) 	bpf_program_prep_t preprocessor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   271) 	struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   272) 	void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   273) 	bpf_program_clear_priv_t clear_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   275) 	bool load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   276) 	enum bpf_prog_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   277) 	enum bpf_attach_type expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   278) 	int prog_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   279) 	__u32 attach_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   280) 	__u32 attach_prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   281) 	void *func_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   282) 	__u32 func_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   283) 	__u32 func_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   285) 	void *line_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   286) 	__u32 line_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   287) 	__u32 line_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   288) 	__u32 prog_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   289) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   291) struct bpf_struct_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   292) 	const char *tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   293) 	const struct btf_type *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   294) 	struct bpf_program **progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   295) 	__u32 *kern_func_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   296) 	/* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   297) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   298) 	/* e.g. struct bpf_struct_ops_tcp_congestion_ops in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   299) 	 *      btf_vmlinux's format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   300) 	 * struct bpf_struct_ops_tcp_congestion_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   301) 	 *	[... some other kernel fields ...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   302) 	 *	struct tcp_congestion_ops data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   303) 	 * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   304) 	 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   305) 	 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   306) 	 * from "data".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   308) 	void *kern_vdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   309) 	__u32 type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   310) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   312) #define DATA_SEC ".data"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   313) #define BSS_SEC ".bss"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   314) #define RODATA_SEC ".rodata"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   315) #define KCONFIG_SEC ".kconfig"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   316) #define KSYMS_SEC ".ksyms"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   317) #define STRUCT_OPS_SEC ".struct_ops"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   319) enum libbpf_map_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   320) 	LIBBPF_MAP_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   321) 	LIBBPF_MAP_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   322) 	LIBBPF_MAP_BSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   323) 	LIBBPF_MAP_RODATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   324) 	LIBBPF_MAP_KCONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   327) static const char * const libbpf_type_to_btf_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   328) 	[LIBBPF_MAP_DATA]	= DATA_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   329) 	[LIBBPF_MAP_BSS]	= BSS_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   330) 	[LIBBPF_MAP_RODATA]	= RODATA_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   331) 	[LIBBPF_MAP_KCONFIG]	= KCONFIG_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   332) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   334) struct bpf_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   335) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   336) 	int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   337) 	int sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   338) 	size_t sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   339) 	int map_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   340) 	int inner_map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   341) 	struct bpf_map_def def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   342) 	__u32 numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   343) 	__u32 btf_var_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   344) 	__u32 btf_key_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   345) 	__u32 btf_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   346) 	__u32 btf_vmlinux_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   347) 	void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   348) 	bpf_map_clear_priv_t clear_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   349) 	enum libbpf_map_type libbpf_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   350) 	void *mmaped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   351) 	struct bpf_struct_ops *st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   352) 	struct bpf_map *inner_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   353) 	void **init_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   354) 	int init_slots_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   355) 	char *pin_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   356) 	bool pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   357) 	bool reused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   358) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   360) enum extern_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   361) 	EXT_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   362) 	EXT_KCFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   363) 	EXT_KSYM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   366) enum kcfg_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   367) 	KCFG_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   368) 	KCFG_CHAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   369) 	KCFG_BOOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   370) 	KCFG_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   371) 	KCFG_TRISTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   372) 	KCFG_CHAR_ARR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   375) struct extern_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   376) 	enum extern_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   377) 	int sym_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   378) 	int btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   379) 	int sec_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   380) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   381) 	bool is_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   382) 	bool is_weak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   383) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   384) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   385) 			enum kcfg_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   386) 			int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   387) 			int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   388) 			int data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   389) 			bool is_signed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   390) 		} kcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   391) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   392) 			unsigned long long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   394) 			/* target btf_id of the corresponding kernel var. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   395) 			int vmlinux_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   397) 			/* local btf_id of the ksym extern's type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   398) 			__u32 type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   399) 		} ksym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   400) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   403) static LIST_HEAD(bpf_objects_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   405) struct bpf_object {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   406) 	char name[BPF_OBJ_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   407) 	char license[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   408) 	__u32 kern_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   410) 	struct bpf_program *programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   411) 	size_t nr_programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   412) 	struct bpf_map *maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   413) 	size_t nr_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   414) 	size_t maps_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   416) 	char *kconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   417) 	struct extern_desc *externs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   418) 	int nr_extern;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   419) 	int kconfig_map_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   420) 	int rodata_map_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   422) 	bool loaded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   423) 	bool has_subcalls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   425) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   426) 	 * Information when doing elf related work. Only valid if fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   427) 	 * is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   429) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   430) 		int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   431) 		const void *obj_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   432) 		size_t obj_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   433) 		Elf *elf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   434) 		GElf_Ehdr ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   435) 		Elf_Data *symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   436) 		Elf_Data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   437) 		Elf_Data *rodata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   438) 		Elf_Data *bss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   439) 		Elf_Data *st_ops_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   440) 		size_t shstrndx; /* section index for section name strings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   441) 		size_t strtabidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   442) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   443) 			GElf_Shdr shdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   444) 			Elf_Data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   445) 		} *reloc_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   446) 		int nr_reloc_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   447) 		int maps_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   448) 		int btf_maps_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   449) 		__u32 btf_maps_sec_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   450) 		int text_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   451) 		int symbols_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   452) 		int data_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   453) 		int rodata_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   454) 		int bss_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   455) 		int st_ops_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   456) 	} efile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   457) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   458) 	 * All loaded bpf_object is linked in a list, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   459) 	 * hidden to caller. bpf_objects__<func> handlers deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   460) 	 * all objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   462) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   464) 	struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   465) 	/* Parse and load BTF vmlinux if any of the programs in the object need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   466) 	 * it at load time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   467) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   468) 	struct btf *btf_vmlinux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   469) 	struct btf_ext *btf_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   471) 	void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   472) 	bpf_object_clear_priv_t clear_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   474) 	char path[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   476) #define obj_elf_valid(o)	((o)->efile.elf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   478) static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   479) static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   480) static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   481) static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   482) static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   483) static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   484) static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   485) static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   486) 			      size_t off, __u32 sym_type, GElf_Sym *sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   488) void bpf_program__unload(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   490) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   492) 	if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   493) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   495) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   496) 	 * If the object is opened but the program was never loaded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   497) 	 * it is possible that prog->instances.nr == -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   498) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   499) 	if (prog->instances.nr > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   500) 		for (i = 0; i < prog->instances.nr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   501) 			zclose(prog->instances.fds[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   502) 	} else if (prog->instances.nr != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   503) 		pr_warn("Internal error: instances.nr is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   504) 			prog->instances.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   507) 	prog->instances.nr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   508) 	zfree(&prog->instances.fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   510) 	zfree(&prog->func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   511) 	zfree(&prog->line_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   514) static void bpf_program__exit(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   516) 	if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   517) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   519) 	if (prog->clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   520) 		prog->clear_priv(prog, prog->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   522) 	prog->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   523) 	prog->clear_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   525) 	bpf_program__unload(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   526) 	zfree(&prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   527) 	zfree(&prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   528) 	zfree(&prog->pin_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   529) 	zfree(&prog->insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   530) 	zfree(&prog->reloc_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   532) 	prog->nr_reloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   533) 	prog->insns_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   534) 	prog->sec_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   537) static char *__bpf_program__pin_name(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   539) 	char *name, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   541) 	name = p = strdup(prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   542) 	while ((p = strchr(p, '/')))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   543) 		*p = '_';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   545) 	return name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   548) static bool insn_is_subprog_call(const struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   550) 	return BPF_CLASS(insn->code) == BPF_JMP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   551) 	       BPF_OP(insn->code) == BPF_CALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   552) 	       BPF_SRC(insn->code) == BPF_K &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   553) 	       insn->src_reg == BPF_PSEUDO_CALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   554) 	       insn->dst_reg == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   555) 	       insn->off == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   558) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   559) bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   560) 		      const char *name, size_t sec_idx, const char *sec_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   561) 		      size_t sec_off, void *insn_data, size_t insn_data_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   563) 	if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   564) 		pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   565) 			sec_name, name, sec_off, insn_data_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   566) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   569) 	memset(prog, 0, sizeof(*prog));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   570) 	prog->obj = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   572) 	prog->sec_idx = sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   573) 	prog->sec_insn_off = sec_off / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   574) 	prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   575) 	/* insns_cnt can later be increased by appending used subprograms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   576) 	prog->insns_cnt = prog->sec_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   578) 	prog->type = BPF_PROG_TYPE_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   579) 	prog->load = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   581) 	prog->instances.fds = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   582) 	prog->instances.nr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   584) 	prog->sec_name = strdup(sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   585) 	if (!prog->sec_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   586) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   588) 	prog->name = strdup(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   589) 	if (!prog->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   590) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   592) 	prog->pin_name = __bpf_program__pin_name(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   593) 	if (!prog->pin_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   594) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   596) 	prog->insns = malloc(insn_data_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   597) 	if (!prog->insns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   598) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   599) 	memcpy(prog->insns, insn_data, insn_data_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   601) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   602) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   603) 	pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   604) 	bpf_program__exit(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   605) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   608) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   609) bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   610) 			 const char *sec_name, int sec_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   612) 	struct bpf_program *prog, *progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   613) 	void *data = sec_data->d_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   614) 	size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   615) 	int nr_progs, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   616) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   617) 	GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   619) 	progs = obj->programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   620) 	nr_progs = obj->nr_programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   621) 	sec_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   623) 	while (sec_off < sec_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   624) 		if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   625) 			pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   626) 				sec_name, sec_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   627) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   630) 		prog_sz = sym.st_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   632) 		name = elf_sym_str(obj, sym.st_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   633) 		if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   634) 			pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   635) 				sec_name, sec_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   636) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   637) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   639) 		if (sec_off + prog_sz > sec_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   640) 			pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   641) 				sec_name, sec_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   642) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   645) 		pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   646) 			 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   648) 		progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   649) 		if (!progs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   650) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   651) 			 * In this case the original obj->programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   652) 			 * is still valid, so don't need special treat for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   653) 			 * bpf_close_object().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   654) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   655) 			pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   656) 				sec_name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   657) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   659) 		obj->programs = progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   661) 		prog = &progs[nr_progs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   663) 		err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   664) 					    sec_off, data + sec_off, prog_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   665) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   666) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   668) 		nr_progs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   669) 		obj->nr_programs = nr_progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   671) 		sec_off += prog_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   674) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   677) static __u32 get_kernel_version(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   679) 	__u32 major, minor, patch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   680) 	struct utsname info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   682) 	uname(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   683) 	if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   684) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   685) 	return KERNEL_VERSION(major, minor, patch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   688) static const struct btf_member *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   689) find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   691) 	struct btf_member *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   692) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   694) 	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   695) 		if (btf_member_bit_offset(t, i) == bit_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   696) 			return m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   699) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   702) static const struct btf_member *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   703) find_member_by_name(const struct btf *btf, const struct btf_type *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   704) 		    const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   706) 	struct btf_member *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   707) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   709) 	for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   710) 		if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   711) 			return m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   714) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   717) #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   718) static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   719) 				   const char *name, __u32 kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   721) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   722) find_struct_ops_kern_types(const struct btf *btf, const char *tname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   723) 			   const struct btf_type **type, __u32 *type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   724) 			   const struct btf_type **vtype, __u32 *vtype_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   725) 			   const struct btf_member **data_member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   727) 	const struct btf_type *kern_type, *kern_vtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   728) 	const struct btf_member *kern_data_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   729) 	__s32 kern_vtype_id, kern_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   730) 	__u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   732) 	kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   733) 	if (kern_type_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   734) 		pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   735) 			tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   736) 		return kern_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   738) 	kern_type = btf__type_by_id(btf, kern_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   740) 	/* Find the corresponding "map_value" type that will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   741) 	 * in map_update(BPF_MAP_TYPE_STRUCT_OPS).  For example,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   742) 	 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   743) 	 * btf_vmlinux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   744) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   745) 	kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   746) 						tname, BTF_KIND_STRUCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   747) 	if (kern_vtype_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   748) 		pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   749) 			STRUCT_OPS_VALUE_PREFIX, tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   750) 		return kern_vtype_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   752) 	kern_vtype = btf__type_by_id(btf, kern_vtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   754) 	/* Find "struct tcp_congestion_ops" from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   755) 	 * struct bpf_struct_ops_tcp_congestion_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   756) 	 *	[ ... ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   757) 	 *	struct tcp_congestion_ops data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   758) 	 * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   759) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   760) 	kern_data_member = btf_members(kern_vtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   761) 	for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   762) 		if (kern_data_member->type == kern_type_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   763) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   765) 	if (i == btf_vlen(kern_vtype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   766) 		pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   767) 			tname, STRUCT_OPS_VALUE_PREFIX, tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   768) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   771) 	*type = kern_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   772) 	*type_id = kern_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   773) 	*vtype = kern_vtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   774) 	*vtype_id = kern_vtype_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   775) 	*data_member = kern_data_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   777) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   780) static bool bpf_map__is_struct_ops(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   782) 	return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   785) /* Init the map's fields that depend on kern_btf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   786) static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   787) 					 const struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   788) 					 const struct btf *kern_btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   790) 	const struct btf_member *member, *kern_member, *kern_data_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   791) 	const struct btf_type *type, *kern_type, *kern_vtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   792) 	__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   793) 	struct bpf_struct_ops *st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   794) 	void *data, *kern_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   795) 	const char *tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   796) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   798) 	st_ops = map->st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   799) 	type = st_ops->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   800) 	tname = st_ops->tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   801) 	err = find_struct_ops_kern_types(kern_btf, tname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   802) 					 &kern_type, &kern_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   803) 					 &kern_vtype, &kern_vtype_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   804) 					 &kern_data_member);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   805) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   806) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   808) 	pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   809) 		 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   811) 	map->def.value_size = kern_vtype->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   812) 	map->btf_vmlinux_value_type_id = kern_vtype_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   814) 	st_ops->kern_vdata = calloc(1, kern_vtype->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   815) 	if (!st_ops->kern_vdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   816) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   818) 	data = st_ops->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   819) 	kern_data_off = kern_data_member->offset / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   820) 	kern_data = st_ops->kern_vdata + kern_data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   822) 	member = btf_members(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   823) 	for (i = 0; i < btf_vlen(type); i++, member++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   824) 		const struct btf_type *mtype, *kern_mtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   825) 		__u32 mtype_id, kern_mtype_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   826) 		void *mdata, *kern_mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   827) 		__s64 msize, kern_msize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   828) 		__u32 moff, kern_moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   829) 		__u32 kern_member_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   830) 		const char *mname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   832) 		mname = btf__name_by_offset(btf, member->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   833) 		kern_member = find_member_by_name(kern_btf, kern_type, mname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   834) 		if (!kern_member) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   835) 			pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   836) 				map->name, mname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   837) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   838) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   840) 		kern_member_idx = kern_member - btf_members(kern_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   841) 		if (btf_member_bitfield_size(type, i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   842) 		    btf_member_bitfield_size(kern_type, kern_member_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   843) 			pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   844) 				map->name, mname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   845) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   848) 		moff = member->offset / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   849) 		kern_moff = kern_member->offset / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   851) 		mdata = data + moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   852) 		kern_mdata = kern_data + kern_moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   854) 		mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   855) 		kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   856) 						    &kern_mtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   857) 		if (BTF_INFO_KIND(mtype->info) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   858) 		    BTF_INFO_KIND(kern_mtype->info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   859) 			pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   860) 				map->name, mname, BTF_INFO_KIND(mtype->info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   861) 				BTF_INFO_KIND(kern_mtype->info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   862) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   863) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   865) 		if (btf_is_ptr(mtype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   866) 			struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   868) 			prog = st_ops->progs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   869) 			if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   870) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   872) 			kern_mtype = skip_mods_and_typedefs(kern_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   873) 							    kern_mtype->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   874) 							    &kern_mtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   876) 			/* mtype->type must be a func_proto which was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   877) 			 * guaranteed in bpf_object__collect_st_ops_relos(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   878) 			 * so only check kern_mtype for func_proto here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   879) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   880) 			if (!btf_is_func_proto(kern_mtype)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   881) 				pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   882) 					map->name, mname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   883) 				return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   884) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   886) 			prog->attach_btf_id = kern_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   887) 			prog->expected_attach_type = kern_member_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   889) 			st_ops->kern_func_off[i] = kern_data_off + kern_moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   891) 			pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   892) 				 map->name, mname, prog->name, moff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   893) 				 kern_moff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   895) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   896) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   898) 		msize = btf__resolve_size(btf, mtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   899) 		kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   900) 		if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   901) 			pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   902) 				map->name, mname, (ssize_t)msize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   903) 				(ssize_t)kern_msize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   904) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   907) 		pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   908) 			 map->name, mname, (unsigned int)msize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   909) 			 moff, kern_moff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   910) 		memcpy(kern_mdata, mdata, msize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   913) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   916) static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   918) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   919) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   920) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   922) 	for (i = 0; i < obj->nr_maps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   923) 		map = &obj->maps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   925) 		if (!bpf_map__is_struct_ops(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   926) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   928) 		err = bpf_map__init_kern_struct_ops(map, obj->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   929) 						    obj->btf_vmlinux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   930) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   931) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   934) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   937) static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   939) 	const struct btf_type *type, *datasec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   940) 	const struct btf_var_secinfo *vsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   941) 	struct bpf_struct_ops *st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   942) 	const char *tname, *var_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   943) 	__s32 type_id, datasec_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   944) 	const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   945) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   946) 	__u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   948) 	if (obj->efile.st_ops_shndx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   949) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   951) 	btf = obj->btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   952) 	datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   953) 					    BTF_KIND_DATASEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   954) 	if (datasec_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   955) 		pr_warn("struct_ops init: DATASEC %s not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   956) 			STRUCT_OPS_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   957) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   960) 	datasec = btf__type_by_id(btf, datasec_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   961) 	vsi = btf_var_secinfos(datasec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   962) 	for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   963) 		type = btf__type_by_id(obj->btf, vsi->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   964) 		var_name = btf__name_by_offset(obj->btf, type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   966) 		type_id = btf__resolve_type(obj->btf, vsi->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   967) 		if (type_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   968) 			pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   969) 				vsi->type, STRUCT_OPS_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   970) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   973) 		type = btf__type_by_id(obj->btf, type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   974) 		tname = btf__name_by_offset(obj->btf, type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   975) 		if (!tname[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   976) 			pr_warn("struct_ops init: anonymous type is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   977) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   978) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   979) 		if (!btf_is_struct(type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   980) 			pr_warn("struct_ops init: %s is not a struct\n", tname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   981) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   984) 		map = bpf_object__add_map(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   985) 		if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   986) 			return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   988) 		map->sec_idx = obj->efile.st_ops_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   989) 		map->sec_offset = vsi->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   990) 		map->name = strdup(var_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   991) 		if (!map->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   992) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   994) 		map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   995) 		map->def.key_size = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   996) 		map->def.value_size = type->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   997) 		map->def.max_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   999) 		map->st_ops = calloc(1, sizeof(*map->st_ops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1000) 		if (!map->st_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1001) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1002) 		st_ops = map->st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1003) 		st_ops->data = malloc(type->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1004) 		st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1005) 		st_ops->kern_func_off = malloc(btf_vlen(type) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1006) 					       sizeof(*st_ops->kern_func_off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1007) 		if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1008) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1010) 		if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1011) 			pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1012) 				var_name, STRUCT_OPS_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1013) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1014) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1016) 		memcpy(st_ops->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1017) 		       obj->efile.st_ops_data->d_buf + vsi->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1018) 		       type->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1019) 		st_ops->tname = tname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1020) 		st_ops->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1021) 		st_ops->type_id = type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1023) 		pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1024) 			 tname, type_id, var_name, vsi->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1027) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1030) static struct bpf_object *bpf_object__new(const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1031) 					  const void *obj_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1032) 					  size_t obj_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1033) 					  const char *obj_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1035) 	struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1036) 	char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1038) 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1039) 	if (!obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1040) 		pr_warn("alloc memory failed for %s\n", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1041) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1044) 	strcpy(obj->path, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1045) 	if (obj_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1046) 		strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1047) 		obj->name[sizeof(obj->name) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1048) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1049) 		/* Using basename() GNU version which doesn't modify arg. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1050) 		strncpy(obj->name, basename((void *)path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1051) 			sizeof(obj->name) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1052) 		end = strchr(obj->name, '.');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1053) 		if (end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1054) 			*end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1057) 	obj->efile.fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1058) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1059) 	 * Caller of this function should also call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1060) 	 * bpf_object__elf_finish() after data collection to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1061) 	 * obj_buf to user. If not, we should duplicate the buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1062) 	 * avoid user freeing them before elf finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1063) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1064) 	obj->efile.obj_buf = obj_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1065) 	obj->efile.obj_buf_sz = obj_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1066) 	obj->efile.maps_shndx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1067) 	obj->efile.btf_maps_shndx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1068) 	obj->efile.data_shndx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1069) 	obj->efile.rodata_shndx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1070) 	obj->efile.bss_shndx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1071) 	obj->efile.st_ops_shndx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1072) 	obj->kconfig_map_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1073) 	obj->rodata_map_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1075) 	obj->kern_version = get_kernel_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1076) 	obj->loaded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1078) 	INIT_LIST_HEAD(&obj->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1079) 	list_add(&obj->list, &bpf_objects_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1080) 	return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1083) static void bpf_object__elf_finish(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1085) 	if (!obj_elf_valid(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1086) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1088) 	if (obj->efile.elf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1089) 		elf_end(obj->efile.elf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1090) 		obj->efile.elf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1091) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1092) 	obj->efile.symbols = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1093) 	obj->efile.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1094) 	obj->efile.rodata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1095) 	obj->efile.bss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1096) 	obj->efile.st_ops_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1098) 	zfree(&obj->efile.reloc_sects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1099) 	obj->efile.nr_reloc_sects = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1100) 	zclose(obj->efile.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1101) 	obj->efile.obj_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1102) 	obj->efile.obj_buf_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1105) /* if libelf is old and doesn't support mmap(), fall back to read() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1106) #ifndef ELF_C_READ_MMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1107) #define ELF_C_READ_MMAP ELF_C_READ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1110) static int bpf_object__elf_init(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1112) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1113) 	GElf_Ehdr *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1115) 	if (obj_elf_valid(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1116) 		pr_warn("elf: init internal error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1117) 		return -LIBBPF_ERRNO__LIBELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1120) 	if (obj->efile.obj_buf_sz > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1121) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1122) 		 * obj_buf should have been validated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1123) 		 * bpf_object__open_buffer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1125) 		obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1126) 					    obj->efile.obj_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1127) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1128) 		obj->efile.fd = open(obj->path, O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1129) 		if (obj->efile.fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1130) 			char errmsg[STRERR_BUFSIZE], *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1132) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1133) 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1134) 			pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1135) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1138) 		obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1141) 	if (!obj->efile.elf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1142) 		pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1143) 		err = -LIBBPF_ERRNO__LIBELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1144) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1147) 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1148) 		pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1149) 		err = -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1150) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1152) 	ep = &obj->efile.ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1154) 	if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1155) 		pr_warn("elf: failed to get section names section index for %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1156) 			obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1157) 		err = -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1158) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1161) 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1162) 	if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1163) 		pr_warn("elf: failed to get section names strings from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1164) 			obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1165) 		err = -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1166) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1169) 	/* Old LLVM set e_machine to EM_NONE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1170) 	if (ep->e_type != ET_REL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1171) 	    (ep->e_machine && ep->e_machine != EM_BPF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1172) 		pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1173) 		err = -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1174) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1177) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1178) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1179) 	bpf_object__elf_finish(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1180) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1183) static int bpf_object__check_endianness(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1185) #if __BYTE_ORDER == __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1186) 	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1187) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1188) #elif __BYTE_ORDER == __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1189) 	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1190) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1191) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1192) # error "Unrecognized __BYTE_ORDER__"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1194) 	pr_warn("elf: endianness mismatch in %s.\n", obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1195) 	return -LIBBPF_ERRNO__ENDIAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1198) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1199) bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1201) 	memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1202) 	pr_debug("license of %s is %s\n", obj->path, obj->license);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1203) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1206) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1207) bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1209) 	__u32 kver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1211) 	if (size != sizeof(kver)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1212) 		pr_warn("invalid kver section in %s\n", obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1213) 		return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1215) 	memcpy(&kver, data, sizeof(kver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1216) 	obj->kern_version = kver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1217) 	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1218) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1221) static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1223) 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1224) 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1225) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1226) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1229) int bpf_object__section_size(const struct bpf_object *obj, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1230) 			     __u32 *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1232) 	int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1234) 	*size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1235) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1236) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1237) 	} else if (!strcmp(name, DATA_SEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1238) 		if (obj->efile.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1239) 			*size = obj->efile.data->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1240) 	} else if (!strcmp(name, BSS_SEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1241) 		if (obj->efile.bss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1242) 			*size = obj->efile.bss->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1243) 	} else if (!strcmp(name, RODATA_SEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1244) 		if (obj->efile.rodata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1245) 			*size = obj->efile.rodata->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1246) 	} else if (!strcmp(name, STRUCT_OPS_SEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1247) 		if (obj->efile.st_ops_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1248) 			*size = obj->efile.st_ops_data->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1249) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1250) 		Elf_Scn *scn = elf_sec_by_name(obj, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1251) 		Elf_Data *data = elf_sec_data(obj, scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1253) 		if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1254) 			ret = 0; /* found it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1255) 			*size = data->d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1256) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1259) 	return *size ? 0 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1262) int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1263) 				__u32 *off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1265) 	Elf_Data *symbols = obj->efile.symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1266) 	const char *sname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1267) 	size_t si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1269) 	if (!name || !off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1270) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1272) 	for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1273) 		GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1275) 		if (!gelf_getsym(symbols, si, &sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1276) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1277) 		if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1278) 		    GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1279) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1281) 		sname = elf_sym_str(obj, sym.st_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1282) 		if (!sname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1283) 			pr_warn("failed to get sym name string for var %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1284) 				name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1285) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1286) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1287) 		if (strcmp(name, sname) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1288) 			*off = sym.st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1289) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1293) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1296) static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1298) 	struct bpf_map *new_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1299) 	size_t new_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1300) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1302) 	if (obj->nr_maps < obj->maps_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1303) 		return &obj->maps[obj->nr_maps++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1305) 	new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1306) 	new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1307) 	if (!new_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1308) 		pr_warn("alloc maps for object failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1309) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1312) 	obj->maps_cap = new_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1313) 	obj->maps = new_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1315) 	/* zero out new maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1316) 	memset(obj->maps + obj->nr_maps, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1317) 	       (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1319) 	 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1320) 	 * when failure (zclose won't close negative fd)).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1321) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1322) 	for (i = obj->nr_maps; i < obj->maps_cap; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1323) 		obj->maps[i].fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1324) 		obj->maps[i].inner_map_fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1327) 	return &obj->maps[obj->nr_maps++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1330) static size_t bpf_map_mmap_sz(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1332) 	long page_sz = sysconf(_SC_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1333) 	size_t map_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1335) 	map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1336) 	map_sz = roundup(map_sz, page_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1337) 	return map_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1340) static char *internal_map_name(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1341) 			       enum libbpf_map_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1343) 	char map_name[BPF_OBJ_NAME_LEN], *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1344) 	const char *sfx = libbpf_type_to_btf_name[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1345) 	int sfx_len = max((size_t)7, strlen(sfx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1346) 	int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1347) 			  strlen(obj->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1349) 	snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1350) 		 sfx_len, libbpf_type_to_btf_name[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1352) 	/* sanitise map name to characters allowed by kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1353) 	for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1354) 		if (!isalnum(*p) && *p != '_' && *p != '.')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1355) 			*p = '_';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1357) 	return strdup(map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1360) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1361) bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1362) 			      int sec_idx, void *data, size_t data_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1364) 	struct bpf_map_def *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1365) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1366) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1368) 	map = bpf_object__add_map(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1369) 	if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1370) 		return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1372) 	map->libbpf_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1373) 	map->sec_idx = sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1374) 	map->sec_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1375) 	map->name = internal_map_name(obj, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1376) 	if (!map->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1377) 		pr_warn("failed to alloc map name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1378) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1381) 	def = &map->def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1382) 	def->type = BPF_MAP_TYPE_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1383) 	def->key_size = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1384) 	def->value_size = data_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1385) 	def->max_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1386) 	def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1387) 			 ? BPF_F_RDONLY_PROG : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1388) 	def->map_flags |= BPF_F_MMAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1390) 	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1391) 		 map->name, map->sec_idx, map->sec_offset, def->map_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1393) 	map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1394) 			   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1395) 	if (map->mmaped == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1396) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1397) 		map->mmaped = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1398) 		pr_warn("failed to alloc map '%s' content buffer: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1399) 			map->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1400) 		zfree(&map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1401) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1404) 	if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1405) 		memcpy(map->mmaped, data, data_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1407) 	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1408) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1411) static int bpf_object__init_global_data_maps(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1413) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1415) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1416) 	 * Populate obj->maps with libbpf internal maps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1417) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1418) 	if (obj->efile.data_shndx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1419) 		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1420) 						    obj->efile.data_shndx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1421) 						    obj->efile.data->d_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1422) 						    obj->efile.data->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1423) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1424) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1426) 	if (obj->efile.rodata_shndx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1427) 		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1428) 						    obj->efile.rodata_shndx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1429) 						    obj->efile.rodata->d_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1430) 						    obj->efile.rodata->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1431) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1432) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1434) 		obj->rodata_map_idx = obj->nr_maps - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1436) 	if (obj->efile.bss_shndx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1437) 		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1438) 						    obj->efile.bss_shndx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1439) 						    NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1440) 						    obj->efile.bss->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1441) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1442) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1444) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1448) static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1449) 					       const void *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1451) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1453) 	for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1454) 		if (strcmp(obj->externs[i].name, name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1455) 			return &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1457) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1460) static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1461) 			      char value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1463) 	switch (ext->kcfg.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1464) 	case KCFG_BOOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1465) 		if (value == 'm') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1466) 			pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1467) 				ext->name, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1468) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1470) 		*(bool *)ext_val = value == 'y' ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1471) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1472) 	case KCFG_TRISTATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1473) 		if (value == 'y')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1474) 			*(enum libbpf_tristate *)ext_val = TRI_YES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1475) 		else if (value == 'm')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1476) 			*(enum libbpf_tristate *)ext_val = TRI_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1477) 		else /* value == 'n' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1478) 			*(enum libbpf_tristate *)ext_val = TRI_NO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1479) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1480) 	case KCFG_CHAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1481) 		*(char *)ext_val = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1482) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1483) 	case KCFG_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1484) 	case KCFG_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1485) 	case KCFG_CHAR_ARR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1486) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1487) 		pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1488) 			ext->name, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1489) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1491) 	ext->is_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1492) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1495) static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1496) 			      const char *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1498) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1500) 	if (ext->kcfg.type != KCFG_CHAR_ARR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1501) 		pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1502) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1505) 	len = strlen(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1506) 	if (value[len - 1] != '"') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1507) 		pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1508) 			ext->name, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1509) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1512) 	/* strip quotes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1513) 	len -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1514) 	if (len >= ext->kcfg.sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1515) 		pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1516) 			ext->name, value, len, ext->kcfg.sz - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1517) 		len = ext->kcfg.sz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1519) 	memcpy(ext_val, value + 1, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1520) 	ext_val[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1521) 	ext->is_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1522) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1525) static int parse_u64(const char *value, __u64 *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1527) 	char *value_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1528) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1530) 	errno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1531) 	*res = strtoull(value, &value_end, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1532) 	if (errno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1533) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1534) 		pr_warn("failed to parse '%s' as integer: %d\n", value, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1535) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1537) 	if (*value_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1538) 		pr_warn("failed to parse '%s' as integer completely\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1539) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1541) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1544) static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1546) 	int bit_sz = ext->kcfg.sz * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1548) 	if (ext->kcfg.sz == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1549) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1551) 	/* Validate that value stored in u64 fits in integer of `ext->sz`
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1552) 	 * bytes size without any loss of information. If the target integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1553) 	 * is signed, we rely on the following limits of integer type of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1554) 	 * Y bits and subsequent transformation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1555) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1556) 	 *     -2^(Y-1) <= X           <= 2^(Y-1) - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1557) 	 *            0 <= X + 2^(Y-1) <= 2^Y - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1558) 	 *            0 <= X + 2^(Y-1) <  2^Y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1559) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1560) 	 *  For unsigned target integer, check that all the (64 - Y) bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1561) 	 *  zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1562) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1563) 	if (ext->kcfg.is_signed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1564) 		return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1565) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1566) 		return (v >> bit_sz) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1569) static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1570) 			      __u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1572) 	if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1573) 		pr_warn("extern (kcfg) %s=%llu should be integer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1574) 			ext->name, (unsigned long long)value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1575) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1577) 	if (!is_kcfg_value_in_range(ext, value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1578) 		pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1579) 			ext->name, (unsigned long long)value, ext->kcfg.sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1580) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1582) 	switch (ext->kcfg.sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1583) 		case 1: *(__u8 *)ext_val = value; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1584) 		case 2: *(__u16 *)ext_val = value; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1585) 		case 4: *(__u32 *)ext_val = value; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1586) 		case 8: *(__u64 *)ext_val = value; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1587) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1588) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1590) 	ext->is_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1591) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1594) static int bpf_object__process_kconfig_line(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1595) 					    char *buf, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1597) 	struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1598) 	char *sep, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1599) 	int len, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1600) 	void *ext_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1601) 	__u64 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1603) 	if (strncmp(buf, "CONFIG_", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1604) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1606) 	sep = strchr(buf, '=');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1607) 	if (!sep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1608) 		pr_warn("failed to parse '%s': no separator\n", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1609) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1612) 	/* Trim ending '\n' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1613) 	len = strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1614) 	if (buf[len - 1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1615) 		buf[len - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1616) 	/* Split on '=' and ensure that a value is present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1617) 	*sep = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1618) 	if (!sep[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1619) 		*sep = '=';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1620) 		pr_warn("failed to parse '%s': no value\n", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1621) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1624) 	ext = find_extern_by_name(obj, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1625) 	if (!ext || ext->is_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1626) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1628) 	ext_val = data + ext->kcfg.data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1629) 	value = sep + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1631) 	switch (*value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1632) 	case 'y': case 'n': case 'm':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1633) 		err = set_kcfg_value_tri(ext, ext_val, *value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1634) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1635) 	case '"':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1636) 		err = set_kcfg_value_str(ext, ext_val, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1637) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1638) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1639) 		/* assume integer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1640) 		err = parse_u64(value, &num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1641) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1642) 			pr_warn("extern (kcfg) %s=%s should be integer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1643) 				ext->name, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1644) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1646) 		err = set_kcfg_value_num(ext, ext_val, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1647) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1649) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1650) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1651) 	pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1655) static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1657) 	char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1658) 	struct utsname uts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1659) 	int len, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1660) 	gzFile file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1662) 	uname(&uts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1663) 	len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1664) 	if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1665) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1666) 	else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1667) 		return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1669) 	/* gzopen also accepts uncompressed files. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1670) 	file = gzopen(buf, "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1671) 	if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1672) 		file = gzopen("/proc/config.gz", "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1674) 	if (!file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1675) 		pr_warn("failed to open system Kconfig\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1676) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1679) 	while (gzgets(file, buf, sizeof(buf))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1680) 		err = bpf_object__process_kconfig_line(obj, buf, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1681) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1682) 			pr_warn("error parsing system Kconfig line '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1683) 				buf, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1684) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1688) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1689) 	gzclose(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1690) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1693) static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1694) 					const char *config, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1696) 	char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1697) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1698) 	FILE *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1700) 	file = fmemopen((void *)config, strlen(config), "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1701) 	if (!file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1702) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1703) 		pr_warn("failed to open in-memory Kconfig: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1704) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1707) 	while (fgets(buf, sizeof(buf), file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1708) 		err = bpf_object__process_kconfig_line(obj, buf, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1709) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1710) 			pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1711) 				buf, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1712) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1713) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1716) 	fclose(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1717) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1720) static int bpf_object__init_kconfig_map(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1722) 	struct extern_desc *last_ext = NULL, *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1723) 	size_t map_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1724) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1726) 	for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1727) 		ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1728) 		if (ext->type == EXT_KCFG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1729) 			last_ext = ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1732) 	if (!last_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1733) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1735) 	map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1736) 	err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1737) 					    obj->efile.symbols_shndx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1738) 					    NULL, map_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1739) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1740) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1742) 	obj->kconfig_map_idx = obj->nr_maps - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1744) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1747) static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1749) 	Elf_Data *symbols = obj->efile.symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1750) 	int i, map_def_sz = 0, nr_maps = 0, nr_syms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1751) 	Elf_Data *data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1752) 	Elf_Scn *scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1754) 	if (obj->efile.maps_shndx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1755) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1757) 	if (!symbols)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1758) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1761) 	scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1762) 	data = elf_sec_data(obj, scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1763) 	if (!scn || !data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1764) 		pr_warn("elf: failed to get legacy map definitions for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1765) 			obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1766) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1769) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1770) 	 * Count number of maps. Each map has a name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1771) 	 * Array of maps is not supported: only the first element is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1772) 	 * considered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1773) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1774) 	 * TODO: Detect array of map and report error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1776) 	nr_syms = symbols->d_size / sizeof(GElf_Sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1777) 	for (i = 0; i < nr_syms; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1778) 		GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1780) 		if (!gelf_getsym(symbols, i, &sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1781) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1782) 		if (sym.st_shndx != obj->efile.maps_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1783) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1784) 		nr_maps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1786) 	/* Assume equally sized map definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1787) 	pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1788) 		 nr_maps, data->d_size, obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1790) 	if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1791) 		pr_warn("elf: unable to determine legacy map definition size in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1792) 			obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1793) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1795) 	map_def_sz = data->d_size / nr_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1797) 	/* Fill obj->maps using data in "maps" section.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1798) 	for (i = 0; i < nr_syms; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1799) 		GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1800) 		const char *map_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1801) 		struct bpf_map_def *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1802) 		struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1804) 		if (!gelf_getsym(symbols, i, &sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1805) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1806) 		if (sym.st_shndx != obj->efile.maps_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1807) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1809) 		map = bpf_object__add_map(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1810) 		if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1811) 			return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1813) 		map_name = elf_sym_str(obj, sym.st_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1814) 		if (!map_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1815) 			pr_warn("failed to get map #%d name sym string for obj %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1816) 				i, obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1817) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1818) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1820) 		map->libbpf_type = LIBBPF_MAP_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1821) 		map->sec_idx = sym.st_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1822) 		map->sec_offset = sym.st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1823) 		pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1824) 			 map_name, map->sec_idx, map->sec_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1825) 		if (sym.st_value + map_def_sz > data->d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1826) 			pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1827) 				obj->path, map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1828) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1829) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1831) 		map->name = strdup(map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1832) 		if (!map->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1833) 			pr_warn("failed to alloc map name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1834) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1835) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1836) 		pr_debug("map %d is \"%s\"\n", i, map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1837) 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1838) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1839) 		 * If the definition of the map in the object file fits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1840) 		 * bpf_map_def, copy it.  Any extra fields in our version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1841) 		 * of bpf_map_def will default to zero as a result of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1842) 		 * calloc above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1843) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1844) 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1845) 			memcpy(&map->def, def, map_def_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1846) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1847) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1848) 			 * Here the map structure being read is bigger than what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1849) 			 * we expect, truncate if the excess bits are all zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1850) 			 * If they are not zero, reject this map as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1851) 			 * incompatible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1852) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1853) 			char *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1855) 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1856) 			     b < ((char *)def) + map_def_sz; b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1857) 				if (*b != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1858) 					pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1859) 						obj->path, map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1860) 					if (strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1861) 						return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1862) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1863) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1864) 			memcpy(&map->def, def, sizeof(struct bpf_map_def));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1865) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1867) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1870) static const struct btf_type *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1871) skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1873) 	const struct btf_type *t = btf__type_by_id(btf, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1875) 	if (res_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1876) 		*res_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1878) 	while (btf_is_mod(t) || btf_is_typedef(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1879) 		if (res_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1880) 			*res_id = t->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1881) 		t = btf__type_by_id(btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1884) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1887) static const struct btf_type *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1888) resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1890) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1892) 	t = skip_mods_and_typedefs(btf, id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1893) 	if (!btf_is_ptr(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1894) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1896) 	t = skip_mods_and_typedefs(btf, t->type, res_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1898) 	return btf_is_func_proto(t) ? t : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1901) static const char *btf_kind_str(const struct btf_type *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1903) 	switch (btf_kind(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1904) 	case BTF_KIND_UNKN: return "void";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1905) 	case BTF_KIND_INT: return "int";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1906) 	case BTF_KIND_PTR: return "ptr";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1907) 	case BTF_KIND_ARRAY: return "array";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1908) 	case BTF_KIND_STRUCT: return "struct";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1909) 	case BTF_KIND_UNION: return "union";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1910) 	case BTF_KIND_ENUM: return "enum";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1911) 	case BTF_KIND_FWD: return "fwd";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1912) 	case BTF_KIND_TYPEDEF: return "typedef";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1913) 	case BTF_KIND_VOLATILE: return "volatile";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1914) 	case BTF_KIND_CONST: return "const";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1915) 	case BTF_KIND_RESTRICT: return "restrict";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1916) 	case BTF_KIND_FUNC: return "func";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1917) 	case BTF_KIND_FUNC_PROTO: return "func_proto";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1918) 	case BTF_KIND_VAR: return "var";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1919) 	case BTF_KIND_DATASEC: return "datasec";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1920) 	default: return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1925)  * Fetch integer attribute of BTF map definition. Such attributes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1926)  * represented using a pointer to an array, in which dimensionality of array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1927)  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1928)  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1929)  * type definition, while using only sizeof(void *) space in ELF data section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1930)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1931) static bool get_map_field_int(const char *map_name, const struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1932) 			      const struct btf_member *m, __u32 *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1934) 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1935) 	const char *name = btf__name_by_offset(btf, m->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1936) 	const struct btf_array *arr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1937) 	const struct btf_type *arr_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1939) 	if (!btf_is_ptr(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1940) 		pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1941) 			map_name, name, btf_kind_str(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1942) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1945) 	arr_t = btf__type_by_id(btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1946) 	if (!arr_t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1947) 		pr_warn("map '%s': attr '%s': type [%u] not found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1948) 			map_name, name, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1949) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1951) 	if (!btf_is_array(arr_t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1952) 		pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1953) 			map_name, name, btf_kind_str(arr_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1954) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1956) 	arr_info = btf_array(arr_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1957) 	*res = arr_info->nelems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1958) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1961) static int build_map_pin_path(struct bpf_map *map, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1963) 	char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1964) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1966) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1967) 		path = "/sys/fs/bpf";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1969) 	len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1970) 	if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1971) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1972) 	else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1973) 		return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1975) 	return bpf_map__set_pin_path(map, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1979) static int parse_btf_map_def(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1980) 			     struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1981) 			     const struct btf_type *def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1982) 			     bool strict, bool is_inner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1983) 			     const char *pin_root_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1985) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1986) 	const struct btf_member *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1987) 	int vlen, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1989) 	vlen = btf_vlen(def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1990) 	m = btf_members(def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1991) 	for (i = 0; i < vlen; i++, m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1992) 		const char *name = btf__name_by_offset(obj->btf, m->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1994) 		if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1995) 			pr_warn("map '%s': invalid field #%d.\n", map->name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1996) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1998) 		if (strcmp(name, "type") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1999) 			if (!get_map_field_int(map->name, obj->btf, m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2000) 					       &map->def.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2001) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2002) 			pr_debug("map '%s': found type = %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2003) 				 map->name, map->def.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2004) 		} else if (strcmp(name, "max_entries") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2005) 			if (!get_map_field_int(map->name, obj->btf, m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2006) 					       &map->def.max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2007) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2008) 			pr_debug("map '%s': found max_entries = %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2009) 				 map->name, map->def.max_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2010) 		} else if (strcmp(name, "map_flags") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2011) 			if (!get_map_field_int(map->name, obj->btf, m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2012) 					       &map->def.map_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2013) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2014) 			pr_debug("map '%s': found map_flags = %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2015) 				 map->name, map->def.map_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2016) 		} else if (strcmp(name, "numa_node") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2017) 			if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2018) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2019) 			pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2020) 		} else if (strcmp(name, "key_size") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2021) 			__u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2023) 			if (!get_map_field_int(map->name, obj->btf, m, &sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2024) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2025) 			pr_debug("map '%s': found key_size = %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2026) 				 map->name, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2027) 			if (map->def.key_size && map->def.key_size != sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2028) 				pr_warn("map '%s': conflicting key size %u != %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2029) 					map->name, map->def.key_size, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2030) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2031) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2032) 			map->def.key_size = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2033) 		} else if (strcmp(name, "key") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2034) 			__s64 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2036) 			t = btf__type_by_id(obj->btf, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2037) 			if (!t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2038) 				pr_warn("map '%s': key type [%d] not found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2039) 					map->name, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2040) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2041) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2042) 			if (!btf_is_ptr(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2043) 				pr_warn("map '%s': key spec is not PTR: %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2044) 					map->name, btf_kind_str(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2045) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2046) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2047) 			sz = btf__resolve_size(obj->btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2048) 			if (sz < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2049) 				pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2050) 					map->name, t->type, (ssize_t)sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2051) 				return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2052) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2053) 			pr_debug("map '%s': found key [%u], sz = %zd.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2054) 				 map->name, t->type, (ssize_t)sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2055) 			if (map->def.key_size && map->def.key_size != sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2056) 				pr_warn("map '%s': conflicting key size %u != %zd.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2057) 					map->name, map->def.key_size, (ssize_t)sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2058) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2059) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2060) 			map->def.key_size = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2061) 			map->btf_key_type_id = t->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2062) 		} else if (strcmp(name, "value_size") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2063) 			__u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2065) 			if (!get_map_field_int(map->name, obj->btf, m, &sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2066) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2067) 			pr_debug("map '%s': found value_size = %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2068) 				 map->name, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2069) 			if (map->def.value_size && map->def.value_size != sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2070) 				pr_warn("map '%s': conflicting value size %u != %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2071) 					map->name, map->def.value_size, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2072) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2073) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2074) 			map->def.value_size = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2075) 		} else if (strcmp(name, "value") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2076) 			__s64 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2078) 			t = btf__type_by_id(obj->btf, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2079) 			if (!t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2080) 				pr_warn("map '%s': value type [%d] not found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2081) 					map->name, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2082) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2083) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2084) 			if (!btf_is_ptr(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2085) 				pr_warn("map '%s': value spec is not PTR: %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2086) 					map->name, btf_kind_str(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2087) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2088) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2089) 			sz = btf__resolve_size(obj->btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2090) 			if (sz < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2091) 				pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2092) 					map->name, t->type, (ssize_t)sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2093) 				return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2094) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2095) 			pr_debug("map '%s': found value [%u], sz = %zd.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2096) 				 map->name, t->type, (ssize_t)sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2097) 			if (map->def.value_size && map->def.value_size != sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2098) 				pr_warn("map '%s': conflicting value size %u != %zd.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2099) 					map->name, map->def.value_size, (ssize_t)sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2100) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2101) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2102) 			map->def.value_size = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2103) 			map->btf_value_type_id = t->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2105) 		else if (strcmp(name, "values") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2106) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2108) 			if (is_inner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2109) 				pr_warn("map '%s': multi-level inner maps not supported.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2110) 					map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2111) 				return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2112) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2113) 			if (i != vlen - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2114) 				pr_warn("map '%s': '%s' member should be last.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2115) 					map->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2116) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2117) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2118) 			if (!bpf_map_type__is_map_in_map(map->def.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2119) 				pr_warn("map '%s': should be map-in-map.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2120) 					map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2121) 				return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2122) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2123) 			if (map->def.value_size && map->def.value_size != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2124) 				pr_warn("map '%s': conflicting value size %u != 4.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2125) 					map->name, map->def.value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2126) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2127) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2128) 			map->def.value_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2129) 			t = btf__type_by_id(obj->btf, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2130) 			if (!t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2131) 				pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2132) 					map->name, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2133) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2134) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2135) 			if (!btf_is_array(t) || btf_array(t)->nelems) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2136) 				pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2137) 					map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2138) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2139) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2140) 			t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2141) 						   NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2142) 			if (!btf_is_ptr(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2143) 				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2144) 					map->name, btf_kind_str(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2145) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2146) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2147) 			t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2148) 			if (!btf_is_struct(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2149) 				pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2150) 					map->name, btf_kind_str(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2151) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2152) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2154) 			map->inner_map = calloc(1, sizeof(*map->inner_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2155) 			if (!map->inner_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2156) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2157) 			map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2158) 			map->inner_map->name = malloc(strlen(map->name) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2159) 						      sizeof(".inner") + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2160) 			if (!map->inner_map->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2161) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2162) 			sprintf(map->inner_map->name, "%s.inner", map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2164) 			err = parse_btf_map_def(obj, map->inner_map, t, strict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2165) 						true /* is_inner */, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2166) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2167) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2168) 		} else if (strcmp(name, "pinning") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2169) 			__u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2170) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2172) 			if (is_inner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2173) 				pr_debug("map '%s': inner def can't be pinned.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2174) 					 map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2175) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2176) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2177) 			if (!get_map_field_int(map->name, obj->btf, m, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2178) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2179) 			pr_debug("map '%s': found pinning = %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2180) 				 map->name, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2182) 			if (val != LIBBPF_PIN_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2183) 			    val != LIBBPF_PIN_BY_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2184) 				pr_warn("map '%s': invalid pinning value %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2185) 					map->name, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2186) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2187) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2188) 			if (val == LIBBPF_PIN_BY_NAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2189) 				err = build_map_pin_path(map, pin_root_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2190) 				if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2191) 					pr_warn("map '%s': couldn't build pin path.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2192) 						map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2193) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2194) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2195) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2196) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2197) 			if (strict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2198) 				pr_warn("map '%s': unknown field '%s'.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2199) 					map->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2200) 				return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2201) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2202) 			pr_debug("map '%s': ignoring unknown field '%s'.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2203) 				 map->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2207) 	if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2208) 		pr_warn("map '%s': map type isn't specified.\n", map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2209) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2212) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2215) static int bpf_object__init_user_btf_map(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2216) 					 const struct btf_type *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2217) 					 int var_idx, int sec_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2218) 					 const Elf_Data *data, bool strict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2219) 					 const char *pin_root_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2221) 	const struct btf_type *var, *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2222) 	const struct btf_var_secinfo *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2223) 	const struct btf_var *var_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2224) 	const char *map_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2225) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2227) 	vi = btf_var_secinfos(sec) + var_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2228) 	var = btf__type_by_id(obj->btf, vi->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2229) 	var_extra = btf_var(var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2230) 	map_name = btf__name_by_offset(obj->btf, var->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2232) 	if (map_name == NULL || map_name[0] == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2233) 		pr_warn("map #%d: empty name.\n", var_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2234) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2236) 	if ((__u64)vi->offset + vi->size > data->d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2237) 		pr_warn("map '%s' BTF data is corrupted.\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2238) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2240) 	if (!btf_is_var(var)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2241) 		pr_warn("map '%s': unexpected var kind %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2242) 			map_name, btf_kind_str(var));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2243) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2245) 	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2246) 	    var_extra->linkage != BTF_VAR_STATIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2247) 		pr_warn("map '%s': unsupported var linkage %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2248) 			map_name, var_extra->linkage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2249) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2252) 	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2253) 	if (!btf_is_struct(def)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2254) 		pr_warn("map '%s': unexpected def kind %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2255) 			map_name, btf_kind_str(var));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2256) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2258) 	if (def->size > vi->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2259) 		pr_warn("map '%s': invalid def size.\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2260) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2263) 	map = bpf_object__add_map(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2264) 	if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2265) 		return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2266) 	map->name = strdup(map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2267) 	if (!map->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2268) 		pr_warn("map '%s': failed to alloc map name.\n", map_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2269) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2271) 	map->libbpf_type = LIBBPF_MAP_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2272) 	map->def.type = BPF_MAP_TYPE_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2273) 	map->sec_idx = sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2274) 	map->sec_offset = vi->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2275) 	map->btf_var_idx = var_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2276) 	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2277) 		 map_name, map->sec_idx, map->sec_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2279) 	return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2282) static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2283) 					  const char *pin_root_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2285) 	const struct btf_type *sec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2286) 	int nr_types, i, vlen, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2287) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2288) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2289) 	Elf_Data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2290) 	Elf_Scn *scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2292) 	if (obj->efile.btf_maps_shndx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2293) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2295) 	scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2296) 	data = elf_sec_data(obj, scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2297) 	if (!scn || !data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2298) 		pr_warn("elf: failed to get %s map definitions for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2299) 			MAPS_ELF_SEC, obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2300) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2303) 	nr_types = btf__get_nr_types(obj->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2304) 	for (i = 1; i <= nr_types; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2305) 		t = btf__type_by_id(obj->btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2306) 		if (!btf_is_datasec(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2307) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2308) 		name = btf__name_by_offset(obj->btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2309) 		if (strcmp(name, MAPS_ELF_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2310) 			sec = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2311) 			obj->efile.btf_maps_sec_btf_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2312) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2316) 	if (!sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2317) 		pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2318) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2321) 	vlen = btf_vlen(sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2322) 	for (i = 0; i < vlen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2323) 		err = bpf_object__init_user_btf_map(obj, sec, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2324) 						    obj->efile.btf_maps_shndx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2325) 						    data, strict,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2326) 						    pin_root_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2327) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2328) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2331) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2334) static int bpf_object__init_maps(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2335) 				 const struct bpf_object_open_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2337) 	const char *pin_root_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2338) 	bool strict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2339) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2341) 	strict = !OPTS_GET(opts, relaxed_maps, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2342) 	pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2344) 	err = bpf_object__init_user_maps(obj, strict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2345) 	err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2346) 	err = err ?: bpf_object__init_global_data_maps(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2347) 	err = err ?: bpf_object__init_kconfig_map(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2348) 	err = err ?: bpf_object__init_struct_ops_maps(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2349) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2350) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2352) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2355) static bool section_have_execinstr(struct bpf_object *obj, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2357) 	GElf_Shdr sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2359) 	if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2360) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2362) 	return sh.sh_flags & SHF_EXECINSTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2365) static bool btf_needs_sanitization(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2367) 	bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2368) 	bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2369) 	bool has_func = kernel_supports(FEAT_BTF_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2371) 	return !has_func || !has_datasec || !has_func_global;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2374) static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2376) 	bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2377) 	bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2378) 	bool has_func = kernel_supports(FEAT_BTF_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2379) 	struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2380) 	int i, j, vlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2382) 	for (i = 1; i <= btf__get_nr_types(btf); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2383) 		t = (struct btf_type *)btf__type_by_id(btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2385) 		if (!has_datasec && btf_is_var(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2386) 			/* replace VAR with INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2387) 			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2388) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2389) 			 * using size = 1 is the safest choice, 4 will be too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2390) 			 * big and cause kernel BTF validation failure if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2391) 			 * original variable took less than 4 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2392) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2393) 			t->size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2394) 			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2395) 		} else if (!has_datasec && btf_is_datasec(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2396) 			/* replace DATASEC with STRUCT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2397) 			const struct btf_var_secinfo *v = btf_var_secinfos(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2398) 			struct btf_member *m = btf_members(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2399) 			struct btf_type *vt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2400) 			char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2402) 			name = (char *)btf__name_by_offset(btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2403) 			while (*name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2404) 				if (*name == '.')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2405) 					*name = '_';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2406) 				name++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2407) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2409) 			vlen = btf_vlen(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2410) 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2411) 			for (j = 0; j < vlen; j++, v++, m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2412) 				/* order of field assignments is important */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2413) 				m->offset = v->offset * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2414) 				m->type = v->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2415) 				/* preserve variable name as member name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2416) 				vt = (void *)btf__type_by_id(btf, v->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2417) 				m->name_off = vt->name_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2418) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2419) 		} else if (!has_func && btf_is_func_proto(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2420) 			/* replace FUNC_PROTO with ENUM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2421) 			vlen = btf_vlen(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2422) 			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2423) 			t->size = sizeof(__u32); /* kernel enforced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2424) 		} else if (!has_func && btf_is_func(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2425) 			/* replace FUNC with TYPEDEF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2426) 			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2427) 		} else if (!has_func_global && btf_is_func(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2428) 			/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2429) 			t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2434) static bool libbpf_needs_btf(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2436) 	return obj->efile.btf_maps_shndx >= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2437) 	       obj->efile.st_ops_shndx >= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2438) 	       obj->nr_extern > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2441) static bool kernel_needs_btf(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2443) 	return obj->efile.st_ops_shndx >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2446) static int bpf_object__init_btf(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2447) 				Elf_Data *btf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2448) 				Elf_Data *btf_ext_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2450) 	int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2452) 	if (btf_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2453) 		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2454) 		if (IS_ERR(obj->btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2455) 			err = PTR_ERR(obj->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2456) 			obj->btf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2457) 			pr_warn("Error loading ELF section %s: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2458) 				BTF_ELF_SEC, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2459) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2460) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2461) 		/* enforce 8-byte pointers for BPF-targeted BTFs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2462) 		btf__set_pointer_size(obj->btf, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2463) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2465) 	if (btf_ext_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2466) 		if (!obj->btf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2467) 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2468) 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2469) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2471) 		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2472) 					    btf_ext_data->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2473) 		if (IS_ERR(obj->btf_ext)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2474) 			pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2475) 				BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2476) 			obj->btf_ext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2477) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2480) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2481) 	if (err && libbpf_needs_btf(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2482) 		pr_warn("BTF is required, but is missing or corrupted.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2483) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2488) static int bpf_object__finalize_btf(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2490) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2492) 	if (!obj->btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2493) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2495) 	err = btf__finalize_data(obj, obj->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2496) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2497) 		pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2498) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2501) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2504) static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2506) 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2507) 	    prog->type == BPF_PROG_TYPE_LSM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2508) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2510) 	/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2511) 	 * also need vmlinux BTF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2512) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2513) 	if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2514) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2516) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2519) static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2521) 	bool need_vmlinux_btf = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2522) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2523) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2525) 	/* CO-RE relocations need kernel BTF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2526) 	if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2527) 		need_vmlinux_btf = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2529) 	/* Support for typed ksyms needs kernel BTF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2530) 	for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2531) 		const struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2533) 		ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2534) 		if (ext->type == EXT_KSYM && ext->ksym.type_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2535) 			need_vmlinux_btf = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2536) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2540) 	bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2541) 		if (!prog->load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2542) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2543) 		if (libbpf_prog_needs_vmlinux_btf(prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2544) 			need_vmlinux_btf = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2545) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2546) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2549) 	if (!need_vmlinux_btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2550) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2552) 	obj->btf_vmlinux = libbpf_find_kernel_btf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2553) 	if (IS_ERR(obj->btf_vmlinux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2554) 		err = PTR_ERR(obj->btf_vmlinux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2555) 		pr_warn("Error loading vmlinux BTF: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2556) 		obj->btf_vmlinux = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2557) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2559) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2562) static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2564) 	struct btf *kern_btf = obj->btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2565) 	bool btf_mandatory, sanitize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2566) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2568) 	if (!obj->btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2569) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2571) 	if (!kernel_supports(FEAT_BTF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2572) 		if (kernel_needs_btf(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2573) 			err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2574) 			goto report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2575) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2576) 		pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2577) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2580) 	sanitize = btf_needs_sanitization(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2581) 	if (sanitize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2582) 		const void *raw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2583) 		__u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2585) 		/* clone BTF to sanitize a copy and leave the original intact */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2586) 		raw_data = btf__get_raw_data(obj->btf, &sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2587) 		kern_btf = btf__new(raw_data, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2588) 		if (IS_ERR(kern_btf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2589) 			return PTR_ERR(kern_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2591) 		/* enforce 8-byte pointers for BPF-targeted BTFs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2592) 		btf__set_pointer_size(obj->btf, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2593) 		bpf_object__sanitize_btf(obj, kern_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2596) 	err = btf__load(kern_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2597) 	if (sanitize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2598) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2599) 			/* move fd to libbpf's BTF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2600) 			btf__set_fd(obj->btf, btf__fd(kern_btf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2601) 			btf__set_fd(kern_btf, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2602) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2603) 		btf__free(kern_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2605) report:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2606) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2607) 		btf_mandatory = kernel_needs_btf(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2608) 		pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2609) 			btf_mandatory ? "BTF is mandatory, can't proceed."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2610) 				      : "BTF is optional, ignoring.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2611) 		if (!btf_mandatory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2612) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2614) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2617) static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2619) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2621) 	name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2622) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2623) 		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2624) 			off, obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2625) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2628) 	return name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2631) static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2633) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2635) 	name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2636) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2637) 		pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2638) 			off, obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2639) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2642) 	return name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2645) static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2647) 	Elf_Scn *scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2649) 	scn = elf_getscn(obj->efile.elf, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2650) 	if (!scn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2651) 		pr_warn("elf: failed to get section(%zu) from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2652) 			idx, obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2653) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2655) 	return scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2658) static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2660) 	Elf_Scn *scn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2661) 	Elf *elf = obj->efile.elf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2662) 	const char *sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2664) 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2665) 		sec_name = elf_sec_name(obj, scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2666) 		if (!sec_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2667) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2669) 		if (strcmp(sec_name, name) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2670) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2672) 		return scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2674) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2677) static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2679) 	if (!scn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2680) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2682) 	if (gelf_getshdr(scn, hdr) != hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2683) 		pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2684) 			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2685) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2688) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2691) static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2693) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2694) 	GElf_Shdr sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2696) 	if (!scn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2697) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2699) 	if (elf_sec_hdr(obj, scn, &sh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2700) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2702) 	name = elf_sec_str(obj, sh.sh_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2703) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2704) 		pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2705) 			elf_ndxscn(scn), obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2706) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2709) 	return name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2712) static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2714) 	Elf_Data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2716) 	if (!scn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2717) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2719) 	data = elf_getdata(scn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2720) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2721) 		pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2722) 			elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2723) 			obj->path, elf_errmsg(-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2724) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2727) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2730) static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2731) 			      size_t off, __u32 sym_type, GElf_Sym *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2733) 	Elf_Data *symbols = obj->efile.symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2734) 	size_t n = symbols->d_size / sizeof(GElf_Sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2735) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2737) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2738) 		if (!gelf_getsym(symbols, i, sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2739) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2740) 		if (sym->st_shndx != sec_idx || sym->st_value != off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2741) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2742) 		if (GELF_ST_TYPE(sym->st_info) != sym_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2743) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2744) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2747) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2750) static bool is_sec_name_dwarf(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2752) 	/* approximation, but the actual list is too long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2753) 	return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2756) static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2758) 	/* no special handling of .strtab */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2759) 	if (hdr->sh_type == SHT_STRTAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2760) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2762) 	/* ignore .llvm_addrsig section as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2763) 	if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2764) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2766) 	/* no subprograms will lead to an empty .text section, ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2767) 	if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2768) 	    strcmp(name, ".text") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2769) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2771) 	/* DWARF sections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2772) 	if (is_sec_name_dwarf(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2773) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2775) 	if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2776) 		name += sizeof(".rel") - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2777) 		/* DWARF section relocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2778) 		if (is_sec_name_dwarf(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2779) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2781) 		/* .BTF and .BTF.ext don't need relocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2782) 		if (strcmp(name, BTF_ELF_SEC) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2783) 		    strcmp(name, BTF_EXT_ELF_SEC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2784) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2787) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2790) static int cmp_progs(const void *_a, const void *_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2792) 	const struct bpf_program *a = _a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2793) 	const struct bpf_program *b = _b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2795) 	if (a->sec_idx != b->sec_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2796) 		return a->sec_idx < b->sec_idx ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2798) 	/* sec_insn_off can't be the same within the section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2799) 	return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2802) static int bpf_object__elf_collect(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2804) 	Elf *elf = obj->efile.elf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2805) 	Elf_Data *btf_ext_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2806) 	Elf_Data *btf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2807) 	int idx = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2808) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2809) 	Elf_Data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2810) 	Elf_Scn *scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2811) 	GElf_Shdr sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2813) 	/* a bunch of ELF parsing functionality depends on processing symbols,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2814) 	 * so do the first pass and find the symbol table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2815) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2816) 	scn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2817) 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2818) 		if (elf_sec_hdr(obj, scn, &sh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2819) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2821) 		if (sh.sh_type == SHT_SYMTAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2822) 			if (obj->efile.symbols) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2823) 				pr_warn("elf: multiple symbol tables in %s\n", obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2824) 				return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2825) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2827) 			data = elf_sec_data(obj, scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2828) 			if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2829) 				return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2831) 			obj->efile.symbols = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2832) 			obj->efile.symbols_shndx = elf_ndxscn(scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2833) 			obj->efile.strtabidx = sh.sh_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2837) 	scn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2838) 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2839) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2841) 		if (elf_sec_hdr(obj, scn, &sh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2842) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2844) 		name = elf_sec_str(obj, sh.sh_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2845) 		if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2846) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2848) 		if (ignore_elf_section(&sh, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2849) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2851) 		data = elf_sec_data(obj, scn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2852) 		if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2853) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2855) 		pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2856) 			 idx, name, (unsigned long)data->d_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2857) 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2858) 			 (int)sh.sh_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2860) 		if (strcmp(name, "license") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2861) 			err = bpf_object__init_license(obj, data->d_buf, data->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2862) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2863) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2864) 		} else if (strcmp(name, "version") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2865) 			err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2866) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2867) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2868) 		} else if (strcmp(name, "maps") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2869) 			obj->efile.maps_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2870) 		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2871) 			obj->efile.btf_maps_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2872) 		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2873) 			btf_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2874) 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2875) 			btf_ext_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2876) 		} else if (sh.sh_type == SHT_SYMTAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2877) 			/* already processed during the first pass above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2878) 		} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2879) 			if (sh.sh_flags & SHF_EXECINSTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2880) 				if (strcmp(name, ".text") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2881) 					obj->efile.text_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2882) 				err = bpf_object__add_programs(obj, data, name, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2883) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2884) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2885) 			} else if (strcmp(name, DATA_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2886) 				obj->efile.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2887) 				obj->efile.data_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2888) 			} else if (strcmp(name, RODATA_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2889) 				obj->efile.rodata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2890) 				obj->efile.rodata_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2891) 			} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2892) 				obj->efile.st_ops_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2893) 				obj->efile.st_ops_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2894) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2895) 				pr_info("elf: skipping unrecognized data section(%d) %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2896) 					idx, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2897) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2898) 		} else if (sh.sh_type == SHT_REL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2899) 			int nr_sects = obj->efile.nr_reloc_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2900) 			void *sects = obj->efile.reloc_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2901) 			int sec = sh.sh_info; /* points to other section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2903) 			/* Only do relo for section with exec instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2904) 			if (!section_have_execinstr(obj, sec) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2905) 			    strcmp(name, ".rel" STRUCT_OPS_SEC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2906) 			    strcmp(name, ".rel" MAPS_ELF_SEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2907) 				pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2908) 					idx, name, sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2909) 					elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2910) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2911) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2913) 			sects = libbpf_reallocarray(sects, nr_sects + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2914) 						    sizeof(*obj->efile.reloc_sects));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2915) 			if (!sects)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2916) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2918) 			obj->efile.reloc_sects = sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2919) 			obj->efile.nr_reloc_sects++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2921) 			obj->efile.reloc_sects[nr_sects].shdr = sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2922) 			obj->efile.reloc_sects[nr_sects].data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2923) 		} else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2924) 			obj->efile.bss = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2925) 			obj->efile.bss_shndx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2926) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2927) 			pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2928) 				(size_t)sh.sh_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2932) 	if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2933) 		pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2934) 		return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2937) 	/* sort BPF programs by section name and in-section instruction offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2938) 	 * for faster search */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2939) 	qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2941) 	return bpf_object__init_btf(obj, btf_data, btf_ext_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2944) static bool sym_is_extern(const GElf_Sym *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2946) 	int bind = GELF_ST_BIND(sym->st_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2947) 	/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2948) 	return sym->st_shndx == SHN_UNDEF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2949) 	       (bind == STB_GLOBAL || bind == STB_WEAK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2950) 	       GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2953) static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2955) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2956) 	const char *var_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2957) 	int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2959) 	if (!btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2960) 		return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2962) 	n = btf__get_nr_types(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2963) 	for (i = 1; i <= n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2964) 		t = btf__type_by_id(btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2966) 		if (!btf_is_var(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2967) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2969) 		var_name = btf__name_by_offset(btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2970) 		if (strcmp(var_name, ext_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2971) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2973) 		if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2974) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2976) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2979) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2982) static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2983) 	const struct btf_var_secinfo *vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2984) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2985) 	int i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2987) 	if (!btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2988) 		return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2990) 	n = btf__get_nr_types(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2991) 	for (i = 1; i <= n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2992) 		t = btf__type_by_id(btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2994) 		if (!btf_is_datasec(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2995) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2997) 		vs = btf_var_secinfos(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2998) 		for (j = 0; j < btf_vlen(t); j++, vs++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2999) 			if (vs->type == ext_btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3000) 				return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3001) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3004) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3007) static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3008) 				     bool *is_signed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3010) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3011) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3013) 	t = skip_mods_and_typedefs(btf, id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3014) 	name = btf__name_by_offset(btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3016) 	if (is_signed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3017) 		*is_signed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3018) 	switch (btf_kind(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3019) 	case BTF_KIND_INT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3020) 		int enc = btf_int_encoding(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3022) 		if (enc & BTF_INT_BOOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3023) 			return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3024) 		if (is_signed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3025) 			*is_signed = enc & BTF_INT_SIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3026) 		if (t->size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3027) 			return KCFG_CHAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3028) 		if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3029) 			return KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3030) 		return KCFG_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3032) 	case BTF_KIND_ENUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3033) 		if (t->size != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3034) 			return KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3035) 		if (strcmp(name, "libbpf_tristate"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3036) 			return KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3037) 		return KCFG_TRISTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3038) 	case BTF_KIND_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3039) 		if (btf_array(t)->nelems == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3040) 			return KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3041) 		if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3042) 			return KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3043) 		return KCFG_CHAR_ARR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3044) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3045) 		return KCFG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3049) static int cmp_externs(const void *_a, const void *_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3051) 	const struct extern_desc *a = _a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3052) 	const struct extern_desc *b = _b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3054) 	if (a->type != b->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3055) 		return a->type < b->type ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3057) 	if (a->type == EXT_KCFG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3058) 		/* descending order by alignment requirements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3059) 		if (a->kcfg.align != b->kcfg.align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3060) 			return a->kcfg.align > b->kcfg.align ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3061) 		/* ascending order by size, within same alignment class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3062) 		if (a->kcfg.sz != b->kcfg.sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3063) 			return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3066) 	/* resolve ties by name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3067) 	return strcmp(a->name, b->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3070) static int find_int_btf_id(const struct btf *btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3072) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3073) 	int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3075) 	n = btf__get_nr_types(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3076) 	for (i = 1; i <= n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3077) 		t = btf__type_by_id(btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3079) 		if (btf_is_int(t) && btf_int_bits(t) == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3080) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3083) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3086) static int bpf_object__collect_externs(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3088) 	struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3089) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3090) 	struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3091) 	int i, n, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3092) 	const char *ext_name, *sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3093) 	Elf_Scn *scn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3094) 	GElf_Shdr sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3096) 	if (!obj->efile.symbols)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3097) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3099) 	scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3100) 	if (elf_sec_hdr(obj, scn, &sh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3101) 		return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3103) 	n = sh.sh_size / sh.sh_entsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3104) 	pr_debug("looking for externs among %d symbols...\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3106) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3107) 		GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3109) 		if (!gelf_getsym(obj->efile.symbols, i, &sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3110) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3111) 		if (!sym_is_extern(&sym))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3112) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3113) 		ext_name = elf_sym_str(obj, sym.st_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3114) 		if (!ext_name || !ext_name[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3115) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3117) 		ext = obj->externs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3118) 		ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3119) 		if (!ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3120) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3121) 		obj->externs = ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3122) 		ext = &ext[obj->nr_extern];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3123) 		memset(ext, 0, sizeof(*ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3124) 		obj->nr_extern++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3126) 		ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3127) 		if (ext->btf_id <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3128) 			pr_warn("failed to find BTF for extern '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3129) 				ext_name, ext->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3130) 			return ext->btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3131) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3132) 		t = btf__type_by_id(obj->btf, ext->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3133) 		ext->name = btf__name_by_offset(obj->btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3134) 		ext->sym_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3135) 		ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3137) 		ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3138) 		if (ext->sec_btf_id <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3139) 			pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3140) 				ext_name, ext->btf_id, ext->sec_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3141) 			return ext->sec_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3143) 		sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3144) 		sec_name = btf__name_by_offset(obj->btf, sec->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3146) 		if (strcmp(sec_name, KCONFIG_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3147) 			kcfg_sec = sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3148) 			ext->type = EXT_KCFG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3149) 			ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3150) 			if (ext->kcfg.sz <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3151) 				pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3152) 					ext_name, ext->kcfg.sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3153) 				return ext->kcfg.sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3154) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3155) 			ext->kcfg.align = btf__align_of(obj->btf, t->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3156) 			if (ext->kcfg.align <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3157) 				pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3158) 					ext_name, ext->kcfg.align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3159) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3160) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3161) 			ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3162) 						        &ext->kcfg.is_signed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3163) 			if (ext->kcfg.type == KCFG_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3164) 				pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3165) 				return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3166) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3167) 		} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3168) 			ksym_sec = sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3169) 			ext->type = EXT_KSYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3170) 			skip_mods_and_typedefs(obj->btf, t->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3171) 					       &ext->ksym.type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3172) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3173) 			pr_warn("unrecognized extern section '%s'\n", sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3174) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3177) 	pr_debug("collected %d externs total\n", obj->nr_extern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3179) 	if (!obj->nr_extern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3180) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3182) 	/* sort externs by type, for kcfg ones also by (align, size, name) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3183) 	qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3185) 	/* for .ksyms section, we need to turn all externs into allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3186) 	 * variables in BTF to pass kernel verification; we do this by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3187) 	 * pretending that each extern is a 8-byte variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3189) 	if (ksym_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3190) 		/* find existing 4-byte integer type in BTF to use for fake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3191) 		 * extern variables in DATASEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3192) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3193) 		int int_btf_id = find_int_btf_id(obj->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3195) 		for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3196) 			ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3197) 			if (ext->type != EXT_KSYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3198) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3199) 			pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3200) 				 i, ext->sym_idx, ext->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3201) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3203) 		sec = ksym_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3204) 		n = btf_vlen(sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3205) 		for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3206) 			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3207) 			struct btf_type *vt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3209) 			vt = (void *)btf__type_by_id(obj->btf, vs->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3210) 			ext_name = btf__name_by_offset(obj->btf, vt->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3211) 			ext = find_extern_by_name(obj, ext_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3212) 			if (!ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3213) 				pr_warn("failed to find extern definition for BTF var '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3214) 					ext_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3215) 				return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3216) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3217) 			btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3218) 			vt->type = int_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3219) 			vs->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3220) 			vs->size = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3222) 		sec->size = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3225) 	if (kcfg_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3226) 		sec = kcfg_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3227) 		/* for kcfg externs calculate their offsets within a .kconfig map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3228) 		off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3229) 		for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3230) 			ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3231) 			if (ext->type != EXT_KCFG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3232) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3234) 			ext->kcfg.data_off = roundup(off, ext->kcfg.align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3235) 			off = ext->kcfg.data_off + ext->kcfg.sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3236) 			pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3237) 				 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3238) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3239) 		sec->size = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3240) 		n = btf_vlen(sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3241) 		for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3242) 			struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3244) 			t = btf__type_by_id(obj->btf, vs->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3245) 			ext_name = btf__name_by_offset(obj->btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3246) 			ext = find_extern_by_name(obj, ext_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3247) 			if (!ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3248) 				pr_warn("failed to find extern definition for BTF var '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3249) 					ext_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3250) 				return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3251) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3252) 			btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3253) 			vs->offset = ext->kcfg.data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3254) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3256) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3259) struct bpf_program *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3260) bpf_object__find_program_by_title(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3261) 				  const char *title)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3263) 	struct bpf_program *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3265) 	bpf_object__for_each_program(pos, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3266) 		if (pos->sec_name && !strcmp(pos->sec_name, title))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3267) 			return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3269) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3272) static bool prog_is_subprog(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3273) 			    const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3275) 	/* For legacy reasons, libbpf supports an entry-point BPF programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3276) 	 * without SEC() attribute, i.e., those in the .text section. But if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3277) 	 * there are 2 or more such programs in the .text section, they all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3278) 	 * must be subprograms called from entry-point BPF programs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3279) 	 * designated SEC()'tions, otherwise there is no way to distinguish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3280) 	 * which of those programs should be loaded vs which are a subprogram.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3281) 	 * Similarly, if there is a function/program in .text and at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3282) 	 * other BPF program with custom SEC() attribute, then we just assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3283) 	 * .text programs are subprograms (even if they are not called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3284) 	 * other programs), because libbpf never explicitly supported mixing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3285) 	 * SEC()-designated BPF programs and .text entry-point BPF programs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3286) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3287) 	return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3290) struct bpf_program *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3291) bpf_object__find_program_by_name(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3292) 				 const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3294) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3296) 	bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3297) 		if (prog_is_subprog(obj, prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3298) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3299) 		if (!strcmp(prog->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3300) 			return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3302) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3305) static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3306) 				      int shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3308) 	return shndx == obj->efile.data_shndx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3309) 	       shndx == obj->efile.bss_shndx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3310) 	       shndx == obj->efile.rodata_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3313) static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3314) 				      int shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3316) 	return shndx == obj->efile.maps_shndx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3317) 	       shndx == obj->efile.btf_maps_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3320) static enum libbpf_map_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3321) bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3323) 	if (shndx == obj->efile.data_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3324) 		return LIBBPF_MAP_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3325) 	else if (shndx == obj->efile.bss_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3326) 		return LIBBPF_MAP_BSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3327) 	else if (shndx == obj->efile.rodata_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3328) 		return LIBBPF_MAP_RODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3329) 	else if (shndx == obj->efile.symbols_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3330) 		return LIBBPF_MAP_KCONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3331) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3332) 		return LIBBPF_MAP_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3335) static int bpf_program__record_reloc(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3336) 				     struct reloc_desc *reloc_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3337) 				     __u32 insn_idx, const char *sym_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3338) 				     const GElf_Sym *sym, const GElf_Rel *rel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3340) 	struct bpf_insn *insn = &prog->insns[insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3341) 	size_t map_idx, nr_maps = prog->obj->nr_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3342) 	struct bpf_object *obj = prog->obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3343) 	__u32 shdr_idx = sym->st_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3344) 	enum libbpf_map_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3345) 	const char *sym_sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3346) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3348) 	reloc_desc->processed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3350) 	/* sub-program call relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3351) 	if (insn->code == (BPF_JMP | BPF_CALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3352) 		if (insn->src_reg != BPF_PSEUDO_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3353) 			pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3354) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3356) 		/* text_shndx can be 0, if no default "main" program exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3357) 		if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3358) 			sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3359) 			pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3360) 				prog->name, sym_name, sym_sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3361) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3363) 		if (sym->st_value % BPF_INSN_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3364) 			pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3365) 				prog->name, sym_name, (size_t)sym->st_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3366) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3368) 		reloc_desc->type = RELO_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3369) 		reloc_desc->insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3370) 		reloc_desc->sym_off = sym->st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3371) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3374) 	if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3375) 		pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3376) 			prog->name, sym_name, insn_idx, insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3377) 		return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3380) 	if (sym_is_extern(sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3381) 		int sym_idx = GELF_R_SYM(rel->r_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3382) 		int i, n = obj->nr_extern;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3383) 		struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3385) 		for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3386) 			ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3387) 			if (ext->sym_idx == sym_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3388) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3390) 		if (i >= n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3391) 			pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3392) 				prog->name, sym_name, sym_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3393) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3394) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3395) 		pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3396) 			 prog->name, i, ext->name, ext->sym_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3397) 		reloc_desc->type = RELO_EXTERN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3398) 		reloc_desc->insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3399) 		reloc_desc->sym_off = i; /* sym_off stores extern index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3400) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3403) 	if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3404) 		pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3405) 			prog->name, sym_name, shdr_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3406) 		return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3409) 	type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3410) 	sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3412) 	/* generic map reference relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3413) 	if (type == LIBBPF_MAP_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3414) 		if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3415) 			pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3416) 				prog->name, sym_name, sym_sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3417) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3419) 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3420) 			map = &obj->maps[map_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3421) 			if (map->libbpf_type != type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3422) 			    map->sec_idx != sym->st_shndx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3423) 			    map->sec_offset != sym->st_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3424) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3425) 			pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3426) 				 prog->name, map_idx, map->name, map->sec_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3427) 				 map->sec_offset, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3428) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3429) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3430) 		if (map_idx >= nr_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3431) 			pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3432) 				prog->name, sym_sec_name, (size_t)sym->st_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3433) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3434) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3435) 		reloc_desc->type = RELO_LD64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3436) 		reloc_desc->insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3437) 		reloc_desc->map_idx = map_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3438) 		reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3439) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3442) 	/* global data map relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3443) 	if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3444) 		pr_warn("prog '%s': bad data relo against section '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3445) 			prog->name, sym_sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3446) 		return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3448) 	for (map_idx = 0; map_idx < nr_maps; map_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3449) 		map = &obj->maps[map_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3450) 		if (map->libbpf_type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3451) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3452) 		pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3453) 			 prog->name, map_idx, map->name, map->sec_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3454) 			 map->sec_offset, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3455) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3457) 	if (map_idx >= nr_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3458) 		pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3459) 			prog->name, sym_sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3460) 		return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3463) 	reloc_desc->type = RELO_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3464) 	reloc_desc->insn_idx = insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3465) 	reloc_desc->map_idx = map_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3466) 	reloc_desc->sym_off = sym->st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3467) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3470) static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3472) 	return insn_idx >= prog->sec_insn_off &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3473) 	       insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3476) static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3477) 						 size_t sec_idx, size_t insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3479) 	int l = 0, r = obj->nr_programs - 1, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3480) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3482) 	while (l < r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3483) 		m = l + (r - l + 1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3484) 		prog = &obj->programs[m];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3486) 		if (prog->sec_idx < sec_idx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3487) 		    (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3488) 			l = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3489) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3490) 			r = m - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3492) 	/* matching program could be at index l, but it still might be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3493) 	 * wrong one, so we need to double check conditions for the last time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3494) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3495) 	prog = &obj->programs[l];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3496) 	if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3497) 		return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3498) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3501) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3502) bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3504) 	Elf_Data *symbols = obj->efile.symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3505) 	const char *relo_sec_name, *sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3506) 	size_t sec_idx = shdr->sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3507) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3508) 	struct reloc_desc *relos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3509) 	int err, i, nrels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3510) 	const char *sym_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3511) 	__u32 insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3512) 	GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3513) 	GElf_Rel rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3515) 	relo_sec_name = elf_sec_str(obj, shdr->sh_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3516) 	sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3517) 	if (!relo_sec_name || !sec_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3518) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3520) 	pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3521) 		 relo_sec_name, sec_idx, sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3522) 	nrels = shdr->sh_size / shdr->sh_entsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3524) 	for (i = 0; i < nrels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3525) 		if (!gelf_getrel(data, i, &rel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3526) 			pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3527) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3528) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3529) 		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3530) 			pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3531) 				relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3532) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3533) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3534) 		if (rel.r_offset % BPF_INSN_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3535) 			pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3536) 				relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3537) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3538) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3540) 		insn_idx = rel.r_offset / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3541) 		/* relocations against static functions are recorded as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3542) 		 * relocations against the section that contains a function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3543) 		 * in such case, symbol will be STT_SECTION and sym.st_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3544) 		 * will point to empty string (0), so fetch section name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3545) 		 * instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3546) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3547) 		if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3548) 			sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3549) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3550) 			sym_name = elf_sym_str(obj, sym.st_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3551) 		sym_name = sym_name ?: "<?";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3553) 		pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3554) 			 relo_sec_name, i, insn_idx, sym_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3556) 		prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3557) 		if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3558) 			pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3559) 				relo_sec_name, i, sec_name, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3560) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3561) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3563) 		relos = libbpf_reallocarray(prog->reloc_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3564) 					    prog->nr_reloc + 1, sizeof(*relos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3565) 		if (!relos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3566) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3567) 		prog->reloc_desc = relos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3569) 		/* adjust insn_idx to local BPF program frame of reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3570) 		insn_idx -= prog->sec_insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3571) 		err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3572) 						insn_idx, sym_name, &sym, &rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3573) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3574) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3576) 		prog->nr_reloc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3578) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3581) static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3583) 	struct bpf_map_def *def = &map->def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3584) 	__u32 key_type_id = 0, value_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3585) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3587) 	/* if it's BTF-defined map, we don't need to search for type IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3588) 	 * For struct_ops map, it does not need btf_key_type_id and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3589) 	 * btf_value_type_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3590) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3591) 	if (map->sec_idx == obj->efile.btf_maps_shndx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3592) 	    bpf_map__is_struct_ops(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3593) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3595) 	if (!bpf_map__is_internal(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3596) 		ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3597) 					   def->value_size, &key_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3598) 					   &value_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3599) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3600) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3601) 		 * LLVM annotates global data differently in BTF, that is,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3602) 		 * only as '.data', '.bss' or '.rodata'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3603) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3604) 		ret = btf__find_by_name(obj->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3605) 				libbpf_type_to_btf_name[map->libbpf_type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3607) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3608) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3610) 	map->btf_key_type_id = key_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3611) 	map->btf_value_type_id = bpf_map__is_internal(map) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3612) 				 ret : value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3613) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3616) static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3618) 	char file[PATH_MAX], buff[4096];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3619) 	FILE *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3620) 	__u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3621) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3623) 	snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3624) 	memset(info, 0, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3626) 	fp = fopen(file, "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3627) 	if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3628) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3629) 		pr_warn("failed to open %s: %d. No procfs support?\n", file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3630) 			err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3631) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3634) 	while (fgets(buff, sizeof(buff), fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3635) 		if (sscanf(buff, "map_type:\t%u", &val) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3636) 			info->type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3637) 		else if (sscanf(buff, "key_size:\t%u", &val) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3638) 			info->key_size = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3639) 		else if (sscanf(buff, "value_size:\t%u", &val) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3640) 			info->value_size = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3641) 		else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3642) 			info->max_entries = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3643) 		else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3644) 			info->map_flags = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3647) 	fclose(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3649) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3652) int bpf_map__reuse_fd(struct bpf_map *map, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3654) 	struct bpf_map_info info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3655) 	__u32 len = sizeof(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3656) 	int new_fd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3657) 	char *new_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3659) 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3660) 	if (err && errno == EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3661) 		err = bpf_get_map_info_from_fdinfo(fd, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3662) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3663) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3665) 	new_name = strdup(info.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3666) 	if (!new_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3667) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3669) 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3670) 	if (new_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3671) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3672) 		goto err_free_new_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3675) 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3676) 	if (new_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3677) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3678) 		goto err_close_new_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3681) 	err = zclose(map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3682) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3683) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3684) 		goto err_close_new_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3686) 	free(map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3688) 	map->fd = new_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3689) 	map->name = new_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3690) 	map->def.type = info.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3691) 	map->def.key_size = info.key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3692) 	map->def.value_size = info.value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3693) 	map->def.max_entries = info.max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3694) 	map->def.map_flags = info.map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3695) 	map->btf_key_type_id = info.btf_key_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3696) 	map->btf_value_type_id = info.btf_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3697) 	map->reused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3699) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3701) err_close_new_fd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3702) 	close(new_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3703) err_free_new_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3704) 	free(new_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3705) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3708) __u32 bpf_map__max_entries(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3710) 	return map->def.max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3713) int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3715) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3716) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3717) 	map->def.max_entries = max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3718) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3721) int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3723) 	if (!map || !max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3724) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3726) 	return bpf_map__set_max_entries(map, max_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3729) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3730) bpf_object__probe_loading(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3732) 	struct bpf_load_program_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3733) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3734) 	struct bpf_insn insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3735) 		BPF_MOV64_IMM(BPF_REG_0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3736) 		BPF_EXIT_INSN(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3737) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3738) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3740) 	/* make sure basic loading works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3742) 	memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3743) 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3744) 	attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3745) 	attr.insns_cnt = ARRAY_SIZE(insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3746) 	attr.license = "GPL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3748) 	ret = bpf_load_program_xattr(&attr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3749) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3750) 		ret = errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3751) 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3752) 		pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3753) 			"program. Make sure your kernel supports BPF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3754) 			"(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3755) 			"set to big enough value.\n", __func__, cp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3756) 		return -ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3758) 	close(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3760) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3763) static int probe_fd(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3765) 	if (fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3766) 		close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3767) 	return fd >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3770) static int probe_kern_prog_name(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3772) 	struct bpf_load_program_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3773) 	struct bpf_insn insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3774) 		BPF_MOV64_IMM(BPF_REG_0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3775) 		BPF_EXIT_INSN(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3776) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3777) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3779) 	/* make sure loading with name works */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3781) 	memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3782) 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3783) 	attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3784) 	attr.insns_cnt = ARRAY_SIZE(insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3785) 	attr.license = "GPL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3786) 	attr.name = "test";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3787) 	ret = bpf_load_program_xattr(&attr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3788) 	return probe_fd(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3791) static int probe_kern_global_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3793) 	struct bpf_load_program_attr prg_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3794) 	struct bpf_create_map_attr map_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3795) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3796) 	struct bpf_insn insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3797) 		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3798) 		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3799) 		BPF_MOV64_IMM(BPF_REG_0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3800) 		BPF_EXIT_INSN(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3801) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3802) 	int ret, map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3804) 	memset(&map_attr, 0, sizeof(map_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3805) 	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3806) 	map_attr.key_size = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3807) 	map_attr.value_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3808) 	map_attr.max_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3810) 	map = bpf_create_map_xattr(&map_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3811) 	if (map < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3812) 		ret = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3813) 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3814) 		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3815) 			__func__, cp, -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3816) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3819) 	insns[0].imm = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3821) 	memset(&prg_attr, 0, sizeof(prg_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3822) 	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3823) 	prg_attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3824) 	prg_attr.insns_cnt = ARRAY_SIZE(insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3825) 	prg_attr.license = "GPL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3827) 	ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3828) 	close(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3829) 	return probe_fd(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3832) static int probe_kern_btf(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3834) 	static const char strs[] = "\0int";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3835) 	__u32 types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3836) 		/* int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3837) 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3838) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3840) 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3841) 					     strs, sizeof(strs)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3844) static int probe_kern_btf_func(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3846) 	static const char strs[] = "\0int\0x\0a";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3847) 	/* void x(int a) {} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3848) 	__u32 types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3849) 		/* int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3850) 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3851) 		/* FUNC_PROTO */                                /* [2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3852) 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3853) 		BTF_PARAM_ENC(7, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3854) 		/* FUNC x */                                    /* [3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3855) 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3856) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3858) 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3859) 					     strs, sizeof(strs)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3862) static int probe_kern_btf_func_global(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3864) 	static const char strs[] = "\0int\0x\0a";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3865) 	/* static void x(int a) {} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3866) 	__u32 types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3867) 		/* int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3868) 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3869) 		/* FUNC_PROTO */                                /* [2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3870) 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3871) 		BTF_PARAM_ENC(7, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3872) 		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3873) 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3874) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3876) 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3877) 					     strs, sizeof(strs)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3880) static int probe_kern_btf_datasec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3882) 	static const char strs[] = "\0x\0.data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3883) 	/* static int a; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3884) 	__u32 types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3885) 		/* int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3886) 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3887) 		/* VAR x */                                     /* [2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3888) 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3889) 		BTF_VAR_STATIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3890) 		/* DATASEC val */                               /* [3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3891) 		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3892) 		BTF_VAR_SECINFO_ENC(2, 0, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3893) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3895) 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3896) 					     strs, sizeof(strs)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3899) static int probe_kern_array_mmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3901) 	struct bpf_create_map_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3902) 		.map_type = BPF_MAP_TYPE_ARRAY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3903) 		.map_flags = BPF_F_MMAPABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3904) 		.key_size = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3905) 		.value_size = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3906) 		.max_entries = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3907) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3909) 	return probe_fd(bpf_create_map_xattr(&attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3912) static int probe_kern_exp_attach_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3914) 	struct bpf_load_program_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3915) 	struct bpf_insn insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3916) 		BPF_MOV64_IMM(BPF_REG_0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3917) 		BPF_EXIT_INSN(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3918) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3920) 	memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3921) 	/* use any valid combination of program type and (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3922) 	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3923) 	 * to see if kernel supports expected_attach_type field for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3924) 	 * BPF_PROG_LOAD command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3925) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3926) 	attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3927) 	attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3928) 	attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3929) 	attr.insns_cnt = ARRAY_SIZE(insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3930) 	attr.license = "GPL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3932) 	return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3935) static int probe_kern_probe_read_kernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3937) 	struct bpf_load_program_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3938) 	struct bpf_insn insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3939) 		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3940) 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3941) 		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3942) 		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3943) 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3944) 		BPF_EXIT_INSN(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3945) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3947) 	memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3948) 	attr.prog_type = BPF_PROG_TYPE_KPROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3949) 	attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3950) 	attr.insns_cnt = ARRAY_SIZE(insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3951) 	attr.license = "GPL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3953) 	return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3956) static int probe_prog_bind_map(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3958) 	struct bpf_load_program_attr prg_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3959) 	struct bpf_create_map_attr map_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3960) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3961) 	struct bpf_insn insns[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3962) 		BPF_MOV64_IMM(BPF_REG_0, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3963) 		BPF_EXIT_INSN(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3964) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3965) 	int ret, map, prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3967) 	memset(&map_attr, 0, sizeof(map_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3968) 	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3969) 	map_attr.key_size = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3970) 	map_attr.value_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3971) 	map_attr.max_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3973) 	map = bpf_create_map_xattr(&map_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3974) 	if (map < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3975) 		ret = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3976) 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3977) 		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3978) 			__func__, cp, -ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3979) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3982) 	memset(&prg_attr, 0, sizeof(prg_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3983) 	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3984) 	prg_attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3985) 	prg_attr.insns_cnt = ARRAY_SIZE(insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3986) 	prg_attr.license = "GPL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3988) 	prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3989) 	if (prog < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3990) 		close(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3991) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3994) 	ret = bpf_prog_bind_map(prog, map, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3996) 	close(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3997) 	close(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3999) 	return ret >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4002) enum kern_feature_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4003) 	FEAT_UNKNOWN = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4004) 	FEAT_SUPPORTED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4005) 	FEAT_MISSING = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4006) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4008) typedef int (*feature_probe_fn)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4010) static struct kern_feature_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4011) 	const char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4012) 	feature_probe_fn probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4013) 	enum kern_feature_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4014) } feature_probes[__FEAT_CNT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4015) 	[FEAT_PROG_NAME] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4016) 		"BPF program name", probe_kern_prog_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4017) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4018) 	[FEAT_GLOBAL_DATA] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4019) 		"global variables", probe_kern_global_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4020) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4021) 	[FEAT_BTF] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4022) 		"minimal BTF", probe_kern_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4023) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4024) 	[FEAT_BTF_FUNC] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4025) 		"BTF functions", probe_kern_btf_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4026) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4027) 	[FEAT_BTF_GLOBAL_FUNC] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4028) 		"BTF global function", probe_kern_btf_func_global,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4029) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4030) 	[FEAT_BTF_DATASEC] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4031) 		"BTF data section and variable", probe_kern_btf_datasec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4032) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4033) 	[FEAT_ARRAY_MMAP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4034) 		"ARRAY map mmap()", probe_kern_array_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4035) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4036) 	[FEAT_EXP_ATTACH_TYPE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4037) 		"BPF_PROG_LOAD expected_attach_type attribute",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4038) 		probe_kern_exp_attach_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4039) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4040) 	[FEAT_PROBE_READ_KERN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4041) 		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4042) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4043) 	[FEAT_PROG_BIND_MAP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4044) 		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4046) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4048) static bool kernel_supports(enum kern_feature_id feat_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4050) 	struct kern_feature_desc *feat = &feature_probes[feat_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4051) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4053) 	if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4054) 		ret = feat->probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4055) 		if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4056) 			WRITE_ONCE(feat->res, FEAT_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4057) 		} else if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4058) 			WRITE_ONCE(feat->res, FEAT_MISSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4059) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4060) 			pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4061) 			WRITE_ONCE(feat->res, FEAT_MISSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4062) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4065) 	return READ_ONCE(feat->res) == FEAT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4068) static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4070) 	struct bpf_map_info map_info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4071) 	char msg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4072) 	__u32 map_info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4073) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4075) 	map_info_len = sizeof(map_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4077) 	err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4078) 	if (err && errno == EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4079) 		err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4080) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4081) 		pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4082) 			libbpf_strerror_r(errno, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4083) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4086) 	return (map_info.type == map->def.type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4087) 		map_info.key_size == map->def.key_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4088) 		map_info.value_size == map->def.value_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4089) 		map_info.max_entries == map->def.max_entries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4090) 		map_info.map_flags == map->def.map_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4093) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4094) bpf_object__reuse_map(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4096) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4097) 	int err, pin_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4099) 	pin_fd = bpf_obj_get(map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4100) 	if (pin_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4101) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4102) 		if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4103) 			pr_debug("found no pinned map to reuse at '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4104) 				 map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4105) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4108) 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4109) 		pr_warn("couldn't retrieve pinned map '%s': %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4110) 			map->pin_path, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4111) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4114) 	if (!map_is_reuse_compat(map, pin_fd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4115) 		pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4116) 			map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4117) 		close(pin_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4118) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4121) 	err = bpf_map__reuse_fd(map, pin_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4122) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4123) 		close(pin_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4124) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4126) 	map->pinned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4127) 	pr_debug("reused pinned map at '%s'\n", map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4129) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4132) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4133) bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4135) 	enum libbpf_map_type map_type = map->libbpf_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4136) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4137) 	int err, zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4139) 	err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4140) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4141) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4142) 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4143) 		pr_warn("Error setting initial map(%s) contents: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4144) 			map->name, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4145) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4148) 	/* Freeze .rodata and .kconfig map as read-only from syscall side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4149) 	if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4150) 		err = bpf_map_freeze(map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4151) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4152) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4153) 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4154) 			pr_warn("Error freezing map(%s) as read-only: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4155) 				map->name, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4156) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4157) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4162) static void bpf_map__destroy(struct bpf_map *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4164) static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4166) 	struct bpf_create_map_attr create_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4167) 	struct bpf_map_def *def = &map->def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4168) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4170) 	memset(&create_attr, 0, sizeof(create_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4172) 	if (kernel_supports(FEAT_PROG_NAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4173) 		create_attr.name = map->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4174) 	create_attr.map_ifindex = map->map_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4175) 	create_attr.map_type = def->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4176) 	create_attr.map_flags = def->map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4177) 	create_attr.key_size = def->key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4178) 	create_attr.value_size = def->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4179) 	create_attr.numa_node = map->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4181) 	if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4182) 		int nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4184) 		nr_cpus = libbpf_num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4185) 		if (nr_cpus < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4186) 			pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4187) 				map->name, nr_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4188) 			return nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4190) 		pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4191) 		create_attr.max_entries = nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4193) 		create_attr.max_entries = def->max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4196) 	if (bpf_map__is_struct_ops(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4197) 		create_attr.btf_vmlinux_value_type_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4198) 			map->btf_vmlinux_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4200) 	create_attr.btf_fd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4201) 	create_attr.btf_key_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4202) 	create_attr.btf_value_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4203) 	if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4204) 		create_attr.btf_fd = btf__fd(obj->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4205) 		create_attr.btf_key_type_id = map->btf_key_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4206) 		create_attr.btf_value_type_id = map->btf_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4209) 	if (bpf_map_type__is_map_in_map(def->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4210) 		if (map->inner_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4211) 			err = bpf_object__create_map(obj, map->inner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4212) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4213) 				pr_warn("map '%s': failed to create inner map: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4214) 					map->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4215) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4216) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4217) 			map->inner_map_fd = bpf_map__fd(map->inner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4219) 		if (map->inner_map_fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4220) 			create_attr.inner_map_fd = map->inner_map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4223) 	map->fd = bpf_create_map_xattr(&create_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4224) 	if (map->fd < 0 && (create_attr.btf_key_type_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4225) 			    create_attr.btf_value_type_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4226) 		char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4228) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4229) 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4230) 		pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4231) 			map->name, cp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4232) 		create_attr.btf_fd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4233) 		create_attr.btf_key_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4234) 		create_attr.btf_value_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4235) 		map->btf_key_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4236) 		map->btf_value_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4237) 		map->fd = bpf_create_map_xattr(&create_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4240) 	err = map->fd < 0 ? -errno : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4242) 	if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4243) 		bpf_map__destroy(map->inner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4244) 		zfree(&map->inner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4247) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4250) static int init_map_slots(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4252) 	const struct bpf_map *targ_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4253) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4254) 	int fd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4256) 	for (i = 0; i < map->init_slots_sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4257) 		if (!map->init_slots[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4258) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4260) 		targ_map = map->init_slots[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4261) 		fd = bpf_map__fd(targ_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4262) 		err = bpf_map_update_elem(map->fd, &i, &fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4263) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4264) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4265) 			pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4266) 				map->name, i, targ_map->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4267) 				fd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4268) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4270) 		pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4271) 			 map->name, i, targ_map->name, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4274) 	zfree(&map->init_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4275) 	map->init_slots_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4277) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4280) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4281) bpf_object__create_maps(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4283) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4284) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4285) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4286) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4287) 	bool retried;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4289) 	for (i = 0; i < obj->nr_maps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4290) 		map = &obj->maps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4292) 		retried = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4293) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4294) 		if (map->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4295) 			err = bpf_object__reuse_map(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4296) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4297) 				pr_warn("map '%s': error reusing pinned map\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4298) 					map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4299) 				goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4300) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4301) 			if (retried && map->fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4302) 				pr_warn("map '%s': cannot find pinned map\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4303) 					map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4304) 				err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4305) 				goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4306) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4309) 		if (map->fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4310) 			pr_debug("map '%s': skipping creation (preset fd=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4311) 				 map->name, map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4312) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4313) 			err = bpf_object__create_map(obj, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4314) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4315) 				goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4317) 			pr_debug("map '%s': created successfully, fd=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4318) 				 map->name, map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4320) 			if (bpf_map__is_internal(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4321) 				err = bpf_object__populate_internal_map(obj, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4322) 				if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4323) 					zclose(map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4324) 					goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4325) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4326) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4328) 			if (map->init_slots_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4329) 				err = init_map_slots(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4330) 				if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4331) 					zclose(map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4332) 					goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4333) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4334) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4337) 		if (map->pin_path && !map->pinned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4338) 			err = bpf_map__pin(map, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4339) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4340) 				zclose(map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4341) 				if (!retried && err == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4342) 					retried = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4343) 					goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4344) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4345) 				pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4346) 					map->name, map->pin_path, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4347) 				goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4348) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4352) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4354) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4355) 	cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4356) 	pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4357) 	pr_perm_msg(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4358) 	for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4359) 		zclose(obj->maps[j].fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4360) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4363) #define BPF_CORE_SPEC_MAX_LEN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4365) /* represents BPF CO-RE field or array element accessor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4366) struct bpf_core_accessor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4367) 	__u32 type_id;		/* struct/union type or array element type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4368) 	__u32 idx;		/* field index or array index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4369) 	const char *name;	/* field name or NULL for array accessor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4370) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4372) struct bpf_core_spec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4373) 	const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4374) 	/* high-level spec: named fields and array indices only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4375) 	struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4376) 	/* original unresolved (no skip_mods_or_typedefs) root type ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4377) 	__u32 root_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4378) 	/* CO-RE relocation kind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4379) 	enum bpf_core_relo_kind relo_kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4380) 	/* high-level spec length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4381) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4382) 	/* raw, low-level spec: 1-to-1 with accessor spec string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4383) 	int raw_spec[BPF_CORE_SPEC_MAX_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4384) 	/* raw spec length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4385) 	int raw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4386) 	/* field bit offset represented by spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4387) 	__u32 bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4390) static bool str_is_empty(const char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4392) 	return !s || !s[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4395) static bool is_flex_arr(const struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4396) 			const struct bpf_core_accessor *acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4397) 			const struct btf_array *arr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4399) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4401) 	/* not a flexible array, if not inside a struct or has non-zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4402) 	if (!acc->name || arr->nelems > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4403) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4405) 	/* has to be the last member of enclosing struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4406) 	t = btf__type_by_id(btf, acc->type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4407) 	return acc->idx == btf_vlen(t) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4410) static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4412) 	switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4413) 	case BPF_FIELD_BYTE_OFFSET: return "byte_off";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4414) 	case BPF_FIELD_BYTE_SIZE: return "byte_sz";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4415) 	case BPF_FIELD_EXISTS: return "field_exists";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4416) 	case BPF_FIELD_SIGNED: return "signed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4417) 	case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4418) 	case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4419) 	case BPF_TYPE_ID_LOCAL: return "local_type_id";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4420) 	case BPF_TYPE_ID_TARGET: return "target_type_id";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4421) 	case BPF_TYPE_EXISTS: return "type_exists";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4422) 	case BPF_TYPE_SIZE: return "type_size";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4423) 	case BPF_ENUMVAL_EXISTS: return "enumval_exists";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4424) 	case BPF_ENUMVAL_VALUE: return "enumval_value";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4425) 	default: return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4429) static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4431) 	switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4432) 	case BPF_FIELD_BYTE_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4433) 	case BPF_FIELD_BYTE_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4434) 	case BPF_FIELD_EXISTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4435) 	case BPF_FIELD_SIGNED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4436) 	case BPF_FIELD_LSHIFT_U64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4437) 	case BPF_FIELD_RSHIFT_U64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4438) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4439) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4440) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4444) static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4446) 	switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4447) 	case BPF_TYPE_ID_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4448) 	case BPF_TYPE_ID_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4449) 	case BPF_TYPE_EXISTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4450) 	case BPF_TYPE_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4451) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4452) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4453) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4457) static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4459) 	switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4460) 	case BPF_ENUMVAL_EXISTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4461) 	case BPF_ENUMVAL_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4462) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4463) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4464) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4469)  * Turn bpf_core_relo into a low- and high-level spec representation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4470)  * validating correctness along the way, as well as calculating resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4471)  * field bit offset, specified by accessor string. Low-level spec captures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4472)  * every single level of nestedness, including traversing anonymous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4473)  * struct/union members. High-level one only captures semantically meaningful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4474)  * "turning points": named fields and array indicies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4475)  * E.g., for this case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4477)  *   struct sample {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4478)  *       int __unimportant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4479)  *       struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4480)  *           int __1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4481)  *           int __2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4482)  *           int a[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4483)  *       };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4484)  *   };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4486)  *   struct sample *s = ...;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4487)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4488)  *   int x = &s->a[3]; // access string = '0:1:2:3'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4489)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4490)  * Low-level spec has 1:1 mapping with each element of access string (it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4491)  * just a parsed access string representation): [0, 1, 2, 3].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4492)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4493)  * High-level spec will capture only 3 points:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4494)  *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4495)  *   - field 'a' access (corresponds to '2' in low-level spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4496)  *   - array element #3 access (corresponds to '3' in low-level spec).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4497)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4498)  * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4499)  * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4500)  * spec and raw_spec are kept empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4502)  * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4503)  * string to specify enumerator's value index that need to be relocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4504)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4505) static int bpf_core_parse_spec(const struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4506) 			       __u32 type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4507) 			       const char *spec_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4508) 			       enum bpf_core_relo_kind relo_kind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4509) 			       struct bpf_core_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4511) 	int access_idx, parsed_len, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4512) 	struct bpf_core_accessor *acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4513) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4514) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4515) 	__u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4516) 	__s64 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4518) 	if (str_is_empty(spec_str) || *spec_str == ':')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4519) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4521) 	memset(spec, 0, sizeof(*spec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4522) 	spec->btf = btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4523) 	spec->root_type_id = type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4524) 	spec->relo_kind = relo_kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4526) 	/* type-based relocations don't have a field access string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4527) 	if (core_relo_is_type_based(relo_kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4528) 		if (strcmp(spec_str, "0"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4529) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4530) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4533) 	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4534) 	while (*spec_str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4535) 		if (*spec_str == ':')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4536) 			++spec_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4537) 		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4538) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4539) 		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4540) 			return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4541) 		spec_str += parsed_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4542) 		spec->raw_spec[spec->raw_len++] = access_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4545) 	if (spec->raw_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4546) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4548) 	t = skip_mods_and_typedefs(btf, type_id, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4549) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4550) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4552) 	access_idx = spec->raw_spec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4553) 	acc = &spec->spec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4554) 	acc->type_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4555) 	acc->idx = access_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4556) 	spec->len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4558) 	if (core_relo_is_enumval_based(relo_kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4559) 		if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4560) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4562) 		/* record enumerator name in a first accessor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4563) 		acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4564) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4567) 	if (!core_relo_is_field_based(relo_kind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4568) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4570) 	sz = btf__resolve_size(btf, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4571) 	if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4572) 		return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4573) 	spec->bit_offset = access_idx * sz * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4575) 	for (i = 1; i < spec->raw_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4576) 		t = skip_mods_and_typedefs(btf, id, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4577) 		if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4578) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4580) 		access_idx = spec->raw_spec[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4581) 		acc = &spec->spec[spec->len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4583) 		if (btf_is_composite(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4584) 			const struct btf_member *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4585) 			__u32 bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4587) 			if (access_idx >= btf_vlen(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4588) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4590) 			bit_offset = btf_member_bit_offset(t, access_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4591) 			spec->bit_offset += bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4593) 			m = btf_members(t) + access_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4594) 			if (m->name_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4595) 				name = btf__name_by_offset(btf, m->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4596) 				if (str_is_empty(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4597) 					return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4599) 				acc->type_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4600) 				acc->idx = access_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4601) 				acc->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4602) 				spec->len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4603) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4605) 			id = m->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4606) 		} else if (btf_is_array(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4607) 			const struct btf_array *a = btf_array(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4608) 			bool flex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4610) 			t = skip_mods_and_typedefs(btf, a->type, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4611) 			if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4612) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4614) 			flex = is_flex_arr(btf, acc - 1, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4615) 			if (!flex && access_idx >= a->nelems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4616) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4618) 			spec->spec[spec->len].type_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4619) 			spec->spec[spec->len].idx = access_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4620) 			spec->len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4622) 			sz = btf__resolve_size(btf, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4623) 			if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4624) 				return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4625) 			spec->bit_offset += access_idx * sz * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4626) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4627) 			pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4628) 				type_id, spec_str, i, id, btf_kind_str(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4629) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4630) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4633) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4636) static bool bpf_core_is_flavor_sep(const char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4638) 	/* check X___Y name pattern, where X and Y are not underscores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4639) 	return s[0] != '_' &&				      /* X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4640) 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4641) 	       s[4] != '_';				      /* Y */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4644) /* Given 'some_struct_name___with_flavor' return the length of a name prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4645)  * before last triple underscore. Struct name part after last triple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4646)  * underscore is ignored by BPF CO-RE relocation during relocation matching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4647)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4648) static size_t bpf_core_essential_name_len(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4650) 	size_t n = strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4651) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4653) 	for (i = n - 5; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4654) 		if (bpf_core_is_flavor_sep(name + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4655) 			return i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4657) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4660) /* dynamically sized list of type IDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4661) struct ids_vec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4662) 	__u32 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4663) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4664) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4666) static void bpf_core_free_cands(struct ids_vec *cand_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4668) 	free(cand_ids->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4669) 	free(cand_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4672) static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4673) 					   __u32 local_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4674) 					   const struct btf *targ_btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4676) 	size_t local_essent_len, targ_essent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4677) 	const char *local_name, *targ_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4678) 	const struct btf_type *t, *local_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4679) 	struct ids_vec *cand_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4680) 	__u32 *new_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4681) 	int i, err, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4683) 	local_t = btf__type_by_id(local_btf, local_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4684) 	if (!local_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4685) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4687) 	local_name = btf__name_by_offset(local_btf, local_t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4688) 	if (str_is_empty(local_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4689) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4690) 	local_essent_len = bpf_core_essential_name_len(local_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4692) 	cand_ids = calloc(1, sizeof(*cand_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4693) 	if (!cand_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4694) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4696) 	n = btf__get_nr_types(targ_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4697) 	for (i = 1; i <= n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4698) 		t = btf__type_by_id(targ_btf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4699) 		if (btf_kind(t) != btf_kind(local_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4700) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4702) 		targ_name = btf__name_by_offset(targ_btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4703) 		if (str_is_empty(targ_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4704) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4706) 		targ_essent_len = bpf_core_essential_name_len(targ_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4707) 		if (targ_essent_len != local_essent_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4708) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4710) 		if (strncmp(local_name, targ_name, local_essent_len) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4711) 			pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4712) 				 local_type_id, btf_kind_str(local_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4713) 				 local_name, i, btf_kind_str(t), targ_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4714) 			new_ids = libbpf_reallocarray(cand_ids->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4715) 						      cand_ids->len + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4716) 						      sizeof(*cand_ids->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4717) 			if (!new_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4718) 				err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4719) 				goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4720) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4721) 			cand_ids->data = new_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4722) 			cand_ids->data[cand_ids->len++] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4723) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4725) 	return cand_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4726) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4727) 	bpf_core_free_cands(cand_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4728) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4731) /* Check two types for compatibility for the purpose of field access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4732)  * relocation. const/volatile/restrict and typedefs are skipped to ensure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4733)  * are relocating semantically compatible entities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4734)  *   - any two STRUCTs/UNIONs are compatible and can be mixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4735)  *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4736)  *   - any two PTRs are always compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4737)  *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4738)  *     least one of enums should be anonymous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4739)  *   - for ENUMs, check sizes, names are ignored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4740)  *   - for INT, size and signedness are ignored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4741)  *   - for ARRAY, dimensionality is ignored, element types are checked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4742)  *     compatibility recursively;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4743)  *   - everything else shouldn't be ever a target of relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4744)  * These rules are not set in stone and probably will be adjusted as we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4745)  * more experience with using BPF CO-RE relocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4747) static int bpf_core_fields_are_compat(const struct btf *local_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4748) 				      __u32 local_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4749) 				      const struct btf *targ_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4750) 				      __u32 targ_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4752) 	const struct btf_type *local_type, *targ_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4754) recur:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4755) 	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4756) 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4757) 	if (!local_type || !targ_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4758) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4760) 	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4761) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4762) 	if (btf_kind(local_type) != btf_kind(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4763) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4765) 	switch (btf_kind(local_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4766) 	case BTF_KIND_PTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4767) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4768) 	case BTF_KIND_FWD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4769) 	case BTF_KIND_ENUM: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4770) 		const char *local_name, *targ_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4771) 		size_t local_len, targ_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4773) 		local_name = btf__name_by_offset(local_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4774) 						 local_type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4775) 		targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4776) 		local_len = bpf_core_essential_name_len(local_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4777) 		targ_len = bpf_core_essential_name_len(targ_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4778) 		/* one of them is anonymous or both w/ same flavor-less names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4779) 		return local_len == 0 || targ_len == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4780) 		       (local_len == targ_len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4781) 			strncmp(local_name, targ_name, local_len) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4783) 	case BTF_KIND_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4784) 		/* just reject deprecated bitfield-like integers; all other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4785) 		 * integers are by default compatible between each other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4786) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4787) 		return btf_int_offset(local_type) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4788) 		       btf_int_offset(targ_type) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4789) 	case BTF_KIND_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4790) 		local_id = btf_array(local_type)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4791) 		targ_id = btf_array(targ_type)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4792) 		goto recur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4793) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4794) 		pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4795) 			btf_kind(local_type), local_id, targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4796) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4801)  * Given single high-level named field accessor in local type, find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4802)  * corresponding high-level accessor for a target type. Along the way,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4803)  * maintain low-level spec for target as well. Also keep updating target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4804)  * bit offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4805)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4806)  * Searching is performed through recursive exhaustive enumeration of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4807)  * fields of a struct/union. If there are any anonymous (embedded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4808)  * structs/unions, they are recursively searched as well. If field with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4809)  * desired name is found, check compatibility between local and target types,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4810)  * before returning result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4811)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4812)  * 1 is returned, if field is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4813)  * 0 is returned if no compatible field is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4814)  * <0 is returned on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4816) static int bpf_core_match_member(const struct btf *local_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4817) 				 const struct bpf_core_accessor *local_acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4818) 				 const struct btf *targ_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4819) 				 __u32 targ_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4820) 				 struct bpf_core_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4821) 				 __u32 *next_targ_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4823) 	const struct btf_type *local_type, *targ_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4824) 	const struct btf_member *local_member, *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4825) 	const char *local_name, *targ_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4826) 	__u32 local_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4827) 	int i, n, found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4829) 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4830) 	if (!targ_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4831) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4832) 	if (!btf_is_composite(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4833) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4835) 	local_id = local_acc->type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4836) 	local_type = btf__type_by_id(local_btf, local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4837) 	local_member = btf_members(local_type) + local_acc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4838) 	local_name = btf__name_by_offset(local_btf, local_member->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4840) 	n = btf_vlen(targ_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4841) 	m = btf_members(targ_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4842) 	for (i = 0; i < n; i++, m++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4843) 		__u32 bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4845) 		bit_offset = btf_member_bit_offset(targ_type, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4847) 		/* too deep struct/union/array nesting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4848) 		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4849) 			return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4851) 		/* speculate this member will be the good one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4852) 		spec->bit_offset += bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4853) 		spec->raw_spec[spec->raw_len++] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4855) 		targ_name = btf__name_by_offset(targ_btf, m->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4856) 		if (str_is_empty(targ_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4857) 			/* embedded struct/union, we need to go deeper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4858) 			found = bpf_core_match_member(local_btf, local_acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4859) 						      targ_btf, m->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4860) 						      spec, next_targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4861) 			if (found) /* either found or error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4862) 				return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4863) 		} else if (strcmp(local_name, targ_name) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4864) 			/* matching named field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4865) 			struct bpf_core_accessor *targ_acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4867) 			targ_acc = &spec->spec[spec->len++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4868) 			targ_acc->type_id = targ_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4869) 			targ_acc->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4870) 			targ_acc->name = targ_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4872) 			*next_targ_id = m->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4873) 			found = bpf_core_fields_are_compat(local_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4874) 							   local_member->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4875) 							   targ_btf, m->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4876) 			if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4877) 				spec->len--; /* pop accessor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4878) 			return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4880) 		/* member turned out not to be what we looked for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4881) 		spec->bit_offset -= bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4882) 		spec->raw_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4885) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4888) /* Check local and target types for compatibility. This check is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4889)  * type-based CO-RE relocations and follow slightly different rules than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4890)  * field-based relocations. This function assumes that root types were already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4891)  * checked for name match. Beyond that initial root-level name check, names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4892)  * are completely ignored. Compatibility rules are as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4893)  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4894)  *     kind should match for local and target types (i.e., STRUCT is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4895)  *     compatible with UNION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4896)  *   - for ENUMs, the size is ignored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4897)  *   - for INT, size and signedness are ignored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4898)  *   - for ARRAY, dimensionality is ignored, element types are checked for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4899)  *     compatibility recursively;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4900)  *   - CONST/VOLATILE/RESTRICT modifiers are ignored;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4901)  *   - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4902)  *   - FUNC_PROTOs are compatible if they have compatible signature: same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4903)  *     number of input args and compatible return and argument types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4904)  * These rules are not set in stone and probably will be adjusted as we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4905)  * more experience with using BPF CO-RE relocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4906)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4907) static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4908) 				     const struct btf *targ_btf, __u32 targ_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4910) 	const struct btf_type *local_type, *targ_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4911) 	int depth = 32; /* max recursion depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4913) 	/* caller made sure that names match (ignoring flavor suffix) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4914) 	local_type = btf__type_by_id(local_btf, local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4915) 	targ_type = btf__type_by_id(targ_btf, targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4916) 	if (btf_kind(local_type) != btf_kind(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4917) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4919) recur:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4920) 	depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4921) 	if (depth < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4922) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4924) 	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4925) 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4926) 	if (!local_type || !targ_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4927) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4929) 	if (btf_kind(local_type) != btf_kind(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4930) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4932) 	switch (btf_kind(local_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4933) 	case BTF_KIND_UNKN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4934) 	case BTF_KIND_STRUCT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4935) 	case BTF_KIND_UNION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4936) 	case BTF_KIND_ENUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4937) 	case BTF_KIND_FWD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4938) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4939) 	case BTF_KIND_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4940) 		/* just reject deprecated bitfield-like integers; all other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4941) 		 * integers are by default compatible between each other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4942) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4943) 		return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4944) 	case BTF_KIND_PTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4945) 		local_id = local_type->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4946) 		targ_id = targ_type->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4947) 		goto recur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4948) 	case BTF_KIND_ARRAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4949) 		local_id = btf_array(local_type)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4950) 		targ_id = btf_array(targ_type)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4951) 		goto recur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4952) 	case BTF_KIND_FUNC_PROTO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4953) 		struct btf_param *local_p = btf_params(local_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4954) 		struct btf_param *targ_p = btf_params(targ_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4955) 		__u16 local_vlen = btf_vlen(local_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4956) 		__u16 targ_vlen = btf_vlen(targ_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4957) 		int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4959) 		if (local_vlen != targ_vlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4960) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4962) 		for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4963) 			skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4964) 			skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4965) 			err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4966) 			if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4967) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4968) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4970) 		/* tail recurse for return type check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4971) 		skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4972) 		skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4973) 		goto recur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4975) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4976) 		pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4977) 			btf_kind_str(local_type), local_id, targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4978) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4983)  * Try to match local spec to a target type and, if successful, produce full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4984)  * target spec (high-level, low-level + bit offset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4985)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4986) static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4987) 			       const struct btf *targ_btf, __u32 targ_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4988) 			       struct bpf_core_spec *targ_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4990) 	const struct btf_type *targ_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4991) 	const struct bpf_core_accessor *local_acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4992) 	struct bpf_core_accessor *targ_acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4993) 	int i, sz, matched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4995) 	memset(targ_spec, 0, sizeof(*targ_spec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4996) 	targ_spec->btf = targ_btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4997) 	targ_spec->root_type_id = targ_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4998) 	targ_spec->relo_kind = local_spec->relo_kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5000) 	if (core_relo_is_type_based(local_spec->relo_kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5001) 		return bpf_core_types_are_compat(local_spec->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5002) 						 local_spec->root_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5003) 						 targ_btf, targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5006) 	local_acc = &local_spec->spec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5007) 	targ_acc = &targ_spec->spec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5009) 	if (core_relo_is_enumval_based(local_spec->relo_kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5010) 		size_t local_essent_len, targ_essent_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5011) 		const struct btf_enum *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5012) 		const char *targ_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5014) 		/* has to resolve to an enum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5015) 		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5016) 		if (!btf_is_enum(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5017) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5019) 		local_essent_len = bpf_core_essential_name_len(local_acc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5021) 		for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5022) 			targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5023) 			targ_essent_len = bpf_core_essential_name_len(targ_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5024) 			if (targ_essent_len != local_essent_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5025) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5026) 			if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5027) 				targ_acc->type_id = targ_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5028) 				targ_acc->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5029) 				targ_acc->name = targ_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5030) 				targ_spec->len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5031) 				targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5032) 				targ_spec->raw_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5033) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5034) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5035) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5036) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5039) 	if (!core_relo_is_field_based(local_spec->relo_kind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5040) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5042) 	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5043) 		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5044) 						   &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5045) 		if (!targ_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5046) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5048) 		if (local_acc->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5049) 			matched = bpf_core_match_member(local_spec->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5050) 							local_acc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5051) 							targ_btf, targ_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5052) 							targ_spec, &targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5053) 			if (matched <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5054) 				return matched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5055) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5056) 			/* for i=0, targ_id is already treated as array element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5057) 			 * type (because it's the original struct), for others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5058) 			 * we should find array element type first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5059) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5060) 			if (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5061) 				const struct btf_array *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5062) 				bool flex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5064) 				if (!btf_is_array(targ_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5065) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5067) 				a = btf_array(targ_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5068) 				flex = is_flex_arr(targ_btf, targ_acc - 1, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5069) 				if (!flex && local_acc->idx >= a->nelems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5070) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5071) 				if (!skip_mods_and_typedefs(targ_btf, a->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5072) 							    &targ_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5073) 					return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5074) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5076) 			/* too deep struct/union/array nesting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5077) 			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5078) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5080) 			targ_acc->type_id = targ_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5081) 			targ_acc->idx = local_acc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5082) 			targ_acc->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5083) 			targ_spec->len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5084) 			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5085) 			targ_spec->raw_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5087) 			sz = btf__resolve_size(targ_btf, targ_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5088) 			if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5089) 				return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5090) 			targ_spec->bit_offset += local_acc->idx * sz * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5091) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5094) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5097) static int bpf_core_calc_field_relo(const struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5098) 				    const struct bpf_core_relo *relo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5099) 				    const struct bpf_core_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5100) 				    __u32 *val, __u32 *field_sz, __u32 *type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5101) 				    bool *validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5103) 	const struct bpf_core_accessor *acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5104) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5105) 	__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5106) 	const struct btf_member *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5107) 	const struct btf_type *mt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5108) 	bool bitfield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5109) 	__s64 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5111) 	*field_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5113) 	if (relo->kind == BPF_FIELD_EXISTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5114) 		*val = spec ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5115) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5118) 	if (!spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5119) 		return -EUCLEAN; /* request instruction poisoning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5121) 	acc = &spec->spec[spec->len - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5122) 	t = btf__type_by_id(spec->btf, acc->type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5124) 	/* a[n] accessor needs special handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5125) 	if (!acc->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5126) 		if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5127) 			*val = spec->bit_offset / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5128) 			/* remember field size for load/store mem size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5129) 			sz = btf__resolve_size(spec->btf, acc->type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5130) 			if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5131) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5132) 			*field_sz = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5133) 			*type_id = acc->type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5134) 		} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5135) 			sz = btf__resolve_size(spec->btf, acc->type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5136) 			if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5137) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5138) 			*val = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5139) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5140) 			pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5141) 				prog->name, relo->kind, relo->insn_off / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5142) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5144) 		if (validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5145) 			*validate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5146) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5149) 	m = btf_members(t) + acc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5150) 	mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5151) 	bit_off = spec->bit_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5152) 	bit_sz = btf_member_bitfield_size(t, acc->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5154) 	bitfield = bit_sz > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5155) 	if (bitfield) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5156) 		byte_sz = mt->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5157) 		byte_off = bit_off / 8 / byte_sz * byte_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5158) 		/* figure out smallest int size necessary for bitfield load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5159) 		while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5160) 			if (byte_sz >= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5161) 				/* bitfield can't be read with 64-bit read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5162) 				pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5163) 					prog->name, relo->kind, relo->insn_off / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5164) 				return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5165) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5166) 			byte_sz *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5167) 			byte_off = bit_off / 8 / byte_sz * byte_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5168) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5169) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5170) 		sz = btf__resolve_size(spec->btf, field_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5171) 		if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5172) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5173) 		byte_sz = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5174) 		byte_off = spec->bit_offset / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5175) 		bit_sz = byte_sz * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5178) 	/* for bitfields, all the relocatable aspects are ambiguous and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5179) 	 * might disagree with compiler, so turn off validation of expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5180) 	 * value, except for signedness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5181) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5182) 	if (validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5183) 		*validate = !bitfield;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5185) 	switch (relo->kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5186) 	case BPF_FIELD_BYTE_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5187) 		*val = byte_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5188) 		if (!bitfield) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5189) 			*field_sz = byte_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5190) 			*type_id = field_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5192) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5193) 	case BPF_FIELD_BYTE_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5194) 		*val = byte_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5195) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5196) 	case BPF_FIELD_SIGNED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5197) 		/* enums will be assumed unsigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5198) 		*val = btf_is_enum(mt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5199) 		       (btf_int_encoding(mt) & BTF_INT_SIGNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5200) 		if (validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5201) 			*validate = true; /* signedness is never ambiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5202) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5203) 	case BPF_FIELD_LSHIFT_U64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5204) #if __BYTE_ORDER == __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5205) 		*val = 64 - (bit_off + bit_sz - byte_off  * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5206) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5207) 		*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5209) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5210) 	case BPF_FIELD_RSHIFT_U64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5211) 		*val = 64 - bit_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5212) 		if (validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5213) 			*validate = true; /* right shift is never ambiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5214) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5215) 	case BPF_FIELD_EXISTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5216) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5217) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5220) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5223) static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5224) 				   const struct bpf_core_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5225) 				   __u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5227) 	__s64 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5229) 	/* type-based relos return zero when target type is not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5230) 	if (!spec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5231) 		*val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5232) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5235) 	switch (relo->kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5236) 	case BPF_TYPE_ID_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5237) 		*val = spec->root_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5238) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5239) 	case BPF_TYPE_EXISTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5240) 		*val = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5241) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5242) 	case BPF_TYPE_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5243) 		sz = btf__resolve_size(spec->btf, spec->root_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5244) 		if (sz < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5245) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5246) 		*val = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5247) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5248) 	case BPF_TYPE_ID_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5249) 	/* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5250) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5251) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5254) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5257) static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5258) 				      const struct bpf_core_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5259) 				      __u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5261) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5262) 	const struct btf_enum *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5264) 	switch (relo->kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5265) 	case BPF_ENUMVAL_EXISTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5266) 		*val = spec ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5267) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5268) 	case BPF_ENUMVAL_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5269) 		if (!spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5270) 			return -EUCLEAN; /* request instruction poisoning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5271) 		t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5272) 		e = btf_enum(t) + spec->spec[0].idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5273) 		*val = e->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5274) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5275) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5276) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5279) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5282) struct bpf_core_relo_res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5284) 	/* expected value in the instruction, unless validate == false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5285) 	__u32 orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5286) 	/* new value that needs to be patched up to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5287) 	__u32 new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5288) 	/* relocation unsuccessful, poison instruction, but don't fail load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5289) 	bool poison;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5290) 	/* some relocations can't be validated against orig_val */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5291) 	bool validate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5292) 	/* for field byte offset relocations or the forms:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5293) 	 *     *(T *)(rX + <off>) = rY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5294) 	 *     rX = *(T *)(rY + <off>),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5295) 	 * we remember original and resolved field size to adjust direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5296) 	 * memory loads of pointers and integers; this is necessary for 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5297) 	 * host kernel architectures, but also allows to automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5298) 	 * relocate fields that were resized from, e.g., u32 to u64, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5299) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5300) 	bool fail_memsz_adjust;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5301) 	__u32 orig_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5302) 	__u32 orig_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5303) 	__u32 new_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5304) 	__u32 new_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5307) /* Calculate original and target relocation values, given local and target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5308)  * specs and relocation kind. These values are calculated for each candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5309)  * If there are multiple candidates, resulting values should all be consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5310)  * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5311)  * If instruction has to be poisoned, *poison will be set to true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5313) static int bpf_core_calc_relo(const struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5314) 			      const struct bpf_core_relo *relo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5315) 			      int relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5316) 			      const struct bpf_core_spec *local_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5317) 			      const struct bpf_core_spec *targ_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5318) 			      struct bpf_core_relo_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5320) 	int err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5322) 	res->orig_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5323) 	res->new_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5324) 	res->poison = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5325) 	res->validate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5326) 	res->fail_memsz_adjust = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5327) 	res->orig_sz = res->new_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5328) 	res->orig_type_id = res->new_type_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5330) 	if (core_relo_is_field_based(relo->kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5331) 		err = bpf_core_calc_field_relo(prog, relo, local_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5332) 					       &res->orig_val, &res->orig_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5333) 					       &res->orig_type_id, &res->validate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5334) 		err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5335) 						      &res->new_val, &res->new_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5336) 						      &res->new_type_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5337) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5338) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5339) 		/* Validate if it's safe to adjust load/store memory size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5340) 		 * Adjustments are performed only if original and new memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5341) 		 * sizes differ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5342) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5343) 		res->fail_memsz_adjust = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5344) 		if (res->orig_sz != res->new_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5345) 			const struct btf_type *orig_t, *new_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5347) 			orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5348) 			new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5350) 			/* There are two use cases in which it's safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5351) 			 * adjust load/store's mem size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5352) 			 *   - reading a 32-bit kernel pointer, while on BPF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5353) 			 *   size pointers are always 64-bit; in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5354) 			 *   it's safe to "downsize" instruction size due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5355) 			 *   pointer being treated as unsigned integer with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5356) 			 *   zero-extended upper 32-bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5357) 			 *   - reading unsigned integers, again due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5358) 			 *   zero-extension is preserving the value correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5359) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5360) 			 * In all other cases it's incorrect to attempt to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5361) 			 * load/store field because read value will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5362) 			 * incorrect, so we poison relocated instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5363) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5364) 			if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5365) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5366) 			if (btf_is_int(orig_t) && btf_is_int(new_t) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5367) 			    btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5368) 			    btf_int_encoding(new_t) != BTF_INT_SIGNED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5369) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5371) 			/* mark as invalid mem size adjustment, but this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5372) 			 * only be checked for LDX/STX/ST insns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5373) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5374) 			res->fail_memsz_adjust = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5375) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5376) 	} else if (core_relo_is_type_based(relo->kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5377) 		err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5378) 		err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5379) 	} else if (core_relo_is_enumval_based(relo->kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5380) 		err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5381) 		err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5384) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5385) 	if (err == -EUCLEAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5386) 		/* EUCLEAN is used to signal instruction poisoning request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5387) 		res->poison = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5388) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5389) 	} else if (err == -EOPNOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5390) 		/* EOPNOTSUPP means unknown/unsupported relocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5391) 		pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5392) 			prog->name, relo_idx, core_relo_kind_str(relo->kind),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5393) 			relo->kind, relo->insn_off / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5396) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5400)  * Turn instruction for which CO_RE relocation failed into invalid one with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5401)  * distinct signature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5402)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5403) static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5404) 				 int insn_idx, struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5406) 	pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5407) 		 prog->name, relo_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5408) 	insn->code = BPF_JMP | BPF_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5409) 	insn->dst_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5410) 	insn->src_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5411) 	insn->off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5412) 	/* if this instruction is reachable (not a dead code),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5413) 	 * verifier will complain with the following message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5414) 	 * invalid func unknown#195896080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5415) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5416) 	insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5419) static bool is_ldimm64(struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5421) 	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5424) static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5426) 	switch (BPF_SIZE(insn->code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5427) 	case BPF_DW: return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5428) 	case BPF_W: return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5429) 	case BPF_H: return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5430) 	case BPF_B: return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5431) 	default: return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5435) static int insn_bytes_to_bpf_size(__u32 sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5437) 	switch (sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5438) 	case 8: return BPF_DW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5439) 	case 4: return BPF_W;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5440) 	case 2: return BPF_H;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5441) 	case 1: return BPF_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5442) 	default: return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5447)  * Patch relocatable BPF instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5448)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5449)  * Patched value is determined by relocation kind and target specification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5450)  * For existence relocations target spec will be NULL if field/type is not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5451)  * Expected insn->imm value is determined using relocation kind and local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5452)  * spec, and is checked before patching instruction. If actual insn->imm value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5453)  * is wrong, bail out with error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5454)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5455)  * Currently supported classes of BPF instruction are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5456)  * 1. rX = <imm> (assignment with immediate operand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5457)  * 2. rX += <imm> (arithmetic operations with immediate operand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5458)  * 3. rX = <imm64> (load with 64-bit immediate value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5459)  * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5460)  * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5461)  * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5463) static int bpf_core_patch_insn(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5464) 			       const struct bpf_core_relo *relo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5465) 			       int relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5466) 			       const struct bpf_core_relo_res *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5468) 	__u32 orig_val, new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5469) 	struct bpf_insn *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5470) 	int insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5471) 	__u8 class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5473) 	if (relo->insn_off % BPF_INSN_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5474) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5475) 	insn_idx = relo->insn_off / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5476) 	/* adjust insn_idx from section frame of reference to the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5477) 	 * program's frame of reference; (sub-)program code is not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5478) 	 * relocated, so it's enough to just subtract in-section offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5480) 	insn_idx = insn_idx - prog->sec_insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5481) 	insn = &prog->insns[insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5482) 	class = BPF_CLASS(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5484) 	if (res->poison) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5485) poison:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5486) 		/* poison second part of ldimm64 to avoid confusing error from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5487) 		 * verifier about "unknown opcode 00"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5488) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5489) 		if (is_ldimm64(insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5490) 			bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5491) 		bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5492) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5495) 	orig_val = res->orig_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5496) 	new_val = res->new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5498) 	switch (class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5499) 	case BPF_ALU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5500) 	case BPF_ALU64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5501) 		if (BPF_SRC(insn->code) != BPF_K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5502) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5503) 		if (res->validate && insn->imm != orig_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5504) 			pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5505) 				prog->name, relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5506) 				insn_idx, insn->imm, orig_val, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5507) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5509) 		orig_val = insn->imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5510) 		insn->imm = new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5511) 		pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5512) 			 prog->name, relo_idx, insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5513) 			 orig_val, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5514) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5515) 	case BPF_LDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5516) 	case BPF_ST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5517) 	case BPF_STX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5518) 		if (res->validate && insn->off != orig_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5519) 			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5520) 				prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5521) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5522) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5523) 		if (new_val > SHRT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5524) 			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5525) 				prog->name, relo_idx, insn_idx, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5526) 			return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5528) 		if (res->fail_memsz_adjust) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5529) 			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5530) 				"Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5531) 				prog->name, relo_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5532) 			goto poison;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5533) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5535) 		orig_val = insn->off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5536) 		insn->off = new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5537) 		pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5538) 			 prog->name, relo_idx, insn_idx, orig_val, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5540) 		if (res->new_sz != res->orig_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5541) 			int insn_bytes_sz, insn_bpf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5543) 			insn_bytes_sz = insn_bpf_size_to_bytes(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5544) 			if (insn_bytes_sz != res->orig_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5545) 				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5546) 					prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5547) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5548) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5550) 			insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5551) 			if (insn_bpf_sz < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5552) 				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5553) 					prog->name, relo_idx, insn_idx, res->new_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5554) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5555) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5557) 			insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5558) 			pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5559) 				 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5560) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5561) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5562) 	case BPF_LD: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5563) 		__u64 imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5565) 		if (!is_ldimm64(insn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5566) 		    insn[0].src_reg != 0 || insn[0].off != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5567) 		    insn_idx + 1 >= prog->insns_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5568) 		    insn[1].code != 0 || insn[1].dst_reg != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5569) 		    insn[1].src_reg != 0 || insn[1].off != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5570) 			pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5571) 				prog->name, relo_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5572) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5575) 		imm = insn[0].imm + ((__u64)insn[1].imm << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5576) 		if (res->validate && imm != orig_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5577) 			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5578) 				prog->name, relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5579) 				insn_idx, (unsigned long long)imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5580) 				orig_val, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5581) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5582) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5584) 		insn[0].imm = new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5585) 		insn[1].imm = 0; /* currently only 32-bit values are supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5586) 		pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5587) 			 prog->name, relo_idx, insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5588) 			 (unsigned long long)imm, new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5589) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5591) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5592) 		pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5593) 			prog->name, relo_idx, insn_idx, insn->code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5594) 			insn->src_reg, insn->dst_reg, insn->off, insn->imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5595) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5598) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5601) /* Output spec definition in the format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5602)  * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5603)  * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5604)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5605) static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5607) 	const struct btf_type *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5608) 	const struct btf_enum *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5609) 	const char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5610) 	__u32 type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5611) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5613) 	type_id = spec->root_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5614) 	t = btf__type_by_id(spec->btf, type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5615) 	s = btf__name_by_offset(spec->btf, t->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5617) 	libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5619) 	if (core_relo_is_type_based(spec->relo_kind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5620) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5622) 	if (core_relo_is_enumval_based(spec->relo_kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5623) 		t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5624) 		e = btf_enum(t) + spec->raw_spec[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5625) 		s = btf__name_by_offset(spec->btf, e->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5627) 		libbpf_print(level, "::%s = %u", s, e->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5628) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5631) 	if (core_relo_is_field_based(spec->relo_kind)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5632) 		for (i = 0; i < spec->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5633) 			if (spec->spec[i].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5634) 				libbpf_print(level, ".%s", spec->spec[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5635) 			else if (i > 0 || spec->spec[i].idx > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5636) 				libbpf_print(level, "[%u]", spec->spec[i].idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5637) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5639) 		libbpf_print(level, " (");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5640) 		for (i = 0; i < spec->raw_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5641) 			libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5643) 		if (spec->bit_offset % 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5644) 			libbpf_print(level, " @ offset %u.%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5645) 				     spec->bit_offset / 8, spec->bit_offset % 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5646) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5647) 			libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5648) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5652) static size_t bpf_core_hash_fn(const void *key, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5654) 	return (size_t)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5657) static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5659) 	return k1 == k2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5662) static void *u32_as_hash_key(__u32 x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5664) 	return (void *)(uintptr_t)x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5668)  * CO-RE relocate single instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5669)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5670)  * The outline and important points of the algorithm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5671)  * 1. For given local type, find corresponding candidate target types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5672)  *    Candidate type is a type with the same "essential" name, ignoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5673)  *    everything after last triple underscore (___). E.g., `sample`,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5674)  *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5675)  *    for each other. Names with triple underscore are referred to as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5676)  *    "flavors" and are useful, among other things, to allow to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5677)  *    specify/support incompatible variations of the same kernel struct, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5678)  *    might differ between different kernel versions and/or build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5679)  *    configurations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5680)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5681)  *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5682)  *    converter, when deduplicated BTF of a kernel still contains more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5683)  *    one different types with the same name. In that case, ___2, ___3, etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5684)  *    are appended starting from second name conflict. But start flavors are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5685)  *    also useful to be defined "locally", in BPF program, to extract same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5686)  *    data from incompatible changes between different kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5687)  *    versions/configurations. For instance, to handle field renames between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5688)  *    kernel versions, one can use two flavors of the struct name with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5689)  *    same common name and use conditional relocations to extract that field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5690)  *    depending on target kernel version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5691)  * 2. For each candidate type, try to match local specification to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5692)  *    candidate target type. Matching involves finding corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5693)  *    high-level spec accessors, meaning that all named fields should match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5694)  *    as well as all array accesses should be within the actual bounds. Also,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5695)  *    types should be compatible (see bpf_core_fields_are_compat for details).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5696)  * 3. It is supported and expected that there might be multiple flavors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5697)  *    matching the spec. As long as all the specs resolve to the same set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5698)  *    offsets across all candidates, there is no error. If there is any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5699)  *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5700)  *    imprefection of BTF deduplication, which can cause slight duplication of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5701)  *    the same BTF type, if some directly or indirectly referenced (by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5702)  *    pointer) type gets resolved to different actual types in different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5703)  *    object files. If such situation occurs, deduplicated BTF will end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5704)  *    with two (or more) structurally identical types, which differ only in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5705)  *    types they refer to through pointer. This should be OK in most cases and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5706)  *    is not an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5707)  * 4. Candidate types search is performed by linearly scanning through all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5708)  *    types in target BTF. It is anticipated that this is overall more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5709)  *    efficient memory-wise and not significantly worse (if not better)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5710)  *    CPU-wise compared to prebuilding a map from all local type names to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5711)  *    a list of candidate type names. It's also sped up by caching resolved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5712)  *    list of matching candidates per each local "root" type ID, that has at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5713)  *    least one bpf_core_relo associated with it. This list is shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5714)  *    between multiple relocations for the same type ID and is updated as some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5715)  *    of the candidates are pruned due to structural incompatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5716)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5717) static int bpf_core_apply_relo(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5718) 			       const struct bpf_core_relo *relo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5719) 			       int relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5720) 			       const struct btf *local_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5721) 			       const struct btf *targ_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5722) 			       struct hashmap *cand_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5724) 	struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5725) 	const void *type_key = u32_as_hash_key(relo->type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5726) 	struct bpf_core_relo_res cand_res, targ_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5727) 	const struct btf_type *local_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5728) 	const char *local_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5729) 	struct ids_vec *cand_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5730) 	__u32 local_id, cand_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5731) 	const char *spec_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5732) 	int i, j, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5734) 	local_id = relo->type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5735) 	local_type = btf__type_by_id(local_btf, local_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5736) 	if (!local_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5737) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5739) 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5740) 	if (!local_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5741) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5743) 	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5744) 	if (str_is_empty(spec_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5745) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5747) 	err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5748) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5749) 		pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5750) 			prog->name, relo_idx, local_id, btf_kind_str(local_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5751) 			str_is_empty(local_name) ? "<anon>" : local_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5752) 			spec_str, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5753) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5756) 	pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5757) 		 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5758) 	bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5759) 	libbpf_print(LIBBPF_DEBUG, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5761) 	/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5762) 	if (relo->kind == BPF_TYPE_ID_LOCAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5763) 		targ_res.validate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5764) 		targ_res.poison = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5765) 		targ_res.orig_val = local_spec.root_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5766) 		targ_res.new_val = local_spec.root_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5767) 		goto patch_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5770) 	/* libbpf doesn't support candidate search for anonymous types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5771) 	if (str_is_empty(spec_str)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5772) 		pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5773) 			prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5774) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5777) 	if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5778) 		cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5779) 		if (IS_ERR(cand_ids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5780) 			pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5781) 				prog->name, relo_idx, local_id, btf_kind_str(local_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5782) 				local_name, PTR_ERR(cand_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5783) 			return PTR_ERR(cand_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5784) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5785) 		err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5786) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5787) 			bpf_core_free_cands(cand_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5788) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5789) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5792) 	for (i = 0, j = 0; i < cand_ids->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5793) 		cand_id = cand_ids->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5794) 		err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5795) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5796) 			pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5797) 				prog->name, relo_idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5798) 			bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5799) 			libbpf_print(LIBBPF_WARN, ": %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5800) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5803) 		pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5804) 			 relo_idx, err == 0 ? "non-matching" : "matching", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5805) 		bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5806) 		libbpf_print(LIBBPF_DEBUG, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5808) 		if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5809) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5811) 		err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5812) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5813) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5815) 		if (j == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5816) 			targ_res = cand_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5817) 			targ_spec = cand_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5818) 		} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5819) 			/* if there are many field relo candidates, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5820) 			 * should all resolve to the same bit offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5821) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5822) 			pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5823) 				prog->name, relo_idx, cand_spec.bit_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5824) 				targ_spec.bit_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5825) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5826) 		} else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5827) 			/* all candidates should result in the same relocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5828) 			 * decision and value, otherwise it's dangerous to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5829) 			 * proceed due to ambiguity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5830) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5831) 			pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5832) 				prog->name, relo_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5833) 				cand_res.poison ? "failure" : "success", cand_res.new_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5834) 				targ_res.poison ? "failure" : "success", targ_res.new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5835) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5836) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5838) 		cand_ids->data[j++] = cand_spec.root_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5841) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5842) 	 * For BPF_FIELD_EXISTS relo or when used BPF program has field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5843) 	 * existence checks or kernel version/config checks, it's expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5844) 	 * that we might not find any candidates. In this case, if field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5845) 	 * wasn't found in any candidate, the list of candidates shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5846) 	 * change at all, we'll just handle relocating appropriately,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5847) 	 * depending on relo's kind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5848) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5849) 	if (j > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5850) 		cand_ids->len = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5852) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5853) 	 * If no candidates were found, it might be both a programmer error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5854) 	 * as well as expected case, depending whether instruction w/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5855) 	 * relocation is guarded in some way that makes it unreachable (dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5856) 	 * code) if relocation can't be resolved. This is handled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5857) 	 * bpf_core_patch_insn() uniformly by replacing that instruction with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5858) 	 * BPF helper call insn (using invalid helper ID). If that instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5859) 	 * is indeed unreachable, then it will be ignored and eliminated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5860) 	 * verifier. If it was an error, then verifier will complain and point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5861) 	 * to a specific instruction number in its log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5862) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5863) 	if (j == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5864) 		pr_debug("prog '%s': relo #%d: no matching targets found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5865) 			 prog->name, relo_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5867) 		/* calculate single target relo result explicitly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5868) 		err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5869) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5870) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5873) patch_insn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5874) 	/* bpf_core_patch_insn() should know how to handle missing targ_spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5875) 	err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5876) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5877) 		pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5878) 			prog->name, relo_idx, relo->insn_off, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5879) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5885) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5886) bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5888) 	const struct btf_ext_info_sec *sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5889) 	const struct bpf_core_relo *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5890) 	const struct btf_ext_info *seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5891) 	struct hashmap_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5892) 	struct hashmap *cand_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5893) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5894) 	struct btf *targ_btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5895) 	const char *sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5896) 	int i, err = 0, insn_idx, sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5898) 	if (obj->btf_ext->core_relo_info.len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5899) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5901) 	if (targ_btf_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5902) 		targ_btf = btf__parse(targ_btf_path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5903) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5904) 		targ_btf = obj->btf_vmlinux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5905) 	if (IS_ERR_OR_NULL(targ_btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5906) 		pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5907) 		return PTR_ERR(targ_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5910) 	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5911) 	if (IS_ERR(cand_cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5912) 		err = PTR_ERR(cand_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5913) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5916) 	seg = &obj->btf_ext->core_relo_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5917) 	for_each_btf_ext_sec(seg, sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5918) 		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5919) 		if (str_is_empty(sec_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5920) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5921) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5922) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5923) 		/* bpf_object's ELF is gone by now so it's not easy to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5924) 		 * section index by section name, but we can find *any*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5925) 		 * bpf_program within desired section name and use it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5926) 		 * prog->sec_idx to do a proper search by section index and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5927) 		 * instruction offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5928) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5929) 		prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5930) 		for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5931) 			prog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5932) 			if (strcmp(prog->sec_name, sec_name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5933) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5935) 		if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5936) 			pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5937) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5938) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5939) 		sec_idx = prog->sec_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5941) 		pr_debug("sec '%s': found %d CO-RE relocations\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5942) 			 sec_name, sec->num_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5944) 		for_each_btf_ext_rec(seg, sec, i, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5945) 			insn_idx = rec->insn_off / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5946) 			prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5947) 			if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5948) 				pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5949) 					sec_name, insn_idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5950) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5951) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5952) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5953) 			/* no need to apply CO-RE relocation if the program is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5954) 			 * not going to be loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5955) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5956) 			if (!prog->load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5957) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5959) 			err = bpf_core_apply_relo(prog, rec, i, obj->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5960) 						  targ_btf, cand_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5961) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5962) 				pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5963) 					prog->name, i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5964) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5965) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5966) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5969) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5970) 	/* obj->btf_vmlinux is freed at the end of object load phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5971) 	if (targ_btf != obj->btf_vmlinux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5972) 		btf__free(targ_btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5973) 	if (!IS_ERR_OR_NULL(cand_cache)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5974) 		hashmap__for_each_entry(cand_cache, entry, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5975) 			bpf_core_free_cands(entry->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5976) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5977) 		hashmap__free(cand_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5979) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5982) /* Relocate data references within program code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5983)  *  - map references;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5984)  *  - global variable references;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5985)  *  - extern references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5986)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5987) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5988) bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5990) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5992) 	for (i = 0; i < prog->nr_reloc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5993) 		struct reloc_desc *relo = &prog->reloc_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5994) 		struct bpf_insn *insn = &prog->insns[relo->insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5995) 		struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5997) 		switch (relo->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5998) 		case RELO_LD64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5999) 			insn[0].src_reg = BPF_PSEUDO_MAP_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6000) 			insn[0].imm = obj->maps[relo->map_idx].fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6001) 			relo->processed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6002) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6003) 		case RELO_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6004) 			insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6005) 			insn[1].imm = insn[0].imm + relo->sym_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6006) 			insn[0].imm = obj->maps[relo->map_idx].fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6007) 			relo->processed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6008) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6009) 		case RELO_EXTERN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6010) 			ext = &obj->externs[relo->sym_off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6011) 			if (ext->type == EXT_KCFG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6012) 				insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6013) 				insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6014) 				insn[1].imm = ext->kcfg.data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6015) 			} else /* EXT_KSYM */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6016) 				if (ext->ksym.type_id) { /* typed ksyms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6017) 					insn[0].src_reg = BPF_PSEUDO_BTF_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6018) 					insn[0].imm = ext->ksym.vmlinux_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6019) 				} else { /* typeless ksyms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6020) 					insn[0].imm = (__u32)ext->ksym.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6021) 					insn[1].imm = ext->ksym.addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6022) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6023) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6024) 			relo->processed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6025) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6026) 		case RELO_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6027) 			/* will be handled as a follow up pass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6028) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6029) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6030) 			pr_warn("prog '%s': relo #%d: bad relo type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6031) 				prog->name, i, relo->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6032) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6033) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6036) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6039) static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6040) 				    const struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6041) 				    const struct btf_ext_info *ext_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6042) 				    void **prog_info, __u32 *prog_rec_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6043) 				    __u32 *prog_rec_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6045) 	void *copy_start = NULL, *copy_end = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6046) 	void *rec, *rec_end, *new_prog_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6047) 	const struct btf_ext_info_sec *sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6048) 	size_t old_sz, new_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6049) 	const char *sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6050) 	int i, off_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6052) 	for_each_btf_ext_sec(ext_info, sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6053) 		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6054) 		if (!sec_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6055) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6056) 		if (strcmp(sec_name, prog->sec_name) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6057) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6059) 		for_each_btf_ext_rec(ext_info, sec, i, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6060) 			__u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6062) 			if (insn_off < prog->sec_insn_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6063) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6064) 			if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6065) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6067) 			if (!copy_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6068) 				copy_start = rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6069) 			copy_end = rec + ext_info->rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6072) 		if (!copy_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6073) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6075) 		/* append func/line info of a given (sub-)program to the main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6076) 		 * program func/line info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6077) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6078) 		old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6079) 		new_sz = old_sz + (copy_end - copy_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6080) 		new_prog_info = realloc(*prog_info, new_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6081) 		if (!new_prog_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6082) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6083) 		*prog_info = new_prog_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6084) 		*prog_rec_cnt = new_sz / ext_info->rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6085) 		memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6087) 		/* Kernel instruction offsets are in units of 8-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6088) 		 * instructions, while .BTF.ext instruction offsets generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6089) 		 * by Clang are in units of bytes. So convert Clang offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6090) 		 * into kernel offsets and adjust offset according to program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6091) 		 * relocated position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6092) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6093) 		off_adj = prog->sub_insn_off - prog->sec_insn_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6094) 		rec = new_prog_info + old_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6095) 		rec_end = new_prog_info + new_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6096) 		for (; rec < rec_end; rec += ext_info->rec_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6097) 			__u32 *insn_off = rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6099) 			*insn_off = *insn_off / BPF_INSN_SZ + off_adj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6100) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6101) 		*prog_rec_sz = ext_info->rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6102) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6105) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6108) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6109) reloc_prog_func_and_line_info(const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6110) 			      struct bpf_program *main_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6111) 			      const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6113) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6115) 	/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6116) 	 * supprot func/line info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6117) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6118) 	if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6119) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6121) 	/* only attempt func info relocation if main program's func_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6122) 	 * relocation was successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6124) 	if (main_prog != prog && !main_prog->func_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6125) 		goto line_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6127) 	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6128) 				       &main_prog->func_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6129) 				       &main_prog->func_info_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6130) 				       &main_prog->func_info_rec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6131) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6132) 		if (err != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6133) 			pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6134) 				prog->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6135) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6137) 		if (main_prog->func_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6138) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6139) 			 * Some info has already been found but has problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6140) 			 * in the last btf_ext reloc. Must have to error out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6141) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6142) 			pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6143) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6145) 		/* Have problem loading the very first info. Ignore the rest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6146) 		pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6147) 			prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6150) line_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6151) 	/* don't relocate line info if main program's relocation failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6152) 	if (main_prog != prog && !main_prog->line_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6153) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6155) 	err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6156) 				       &main_prog->line_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6157) 				       &main_prog->line_info_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6158) 				       &main_prog->line_info_rec_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6159) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6160) 		if (err != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6161) 			pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6162) 				prog->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6163) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6165) 		if (main_prog->line_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6166) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6167) 			 * Some info has already been found but has problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6168) 			 * in the last btf_ext reloc. Must have to error out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6169) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6170) 			pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6171) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6173) 		/* Have problem loading the very first info. Ignore the rest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6174) 		pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6175) 			prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6177) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6180) static int cmp_relo_by_insn_idx(const void *key, const void *elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6182) 	size_t insn_idx = *(const size_t *)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6183) 	const struct reloc_desc *relo = elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6185) 	if (insn_idx == relo->insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6186) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6187) 	return insn_idx < relo->insn_idx ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6190) static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6192) 	return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6193) 		       sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6196) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6197) bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6198) 		       struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6200) 	size_t sub_insn_idx, insn_idx, new_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6201) 	struct bpf_program *subprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6202) 	struct bpf_insn *insns, *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6203) 	struct reloc_desc *relo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6204) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6206) 	err = reloc_prog_func_and_line_info(obj, main_prog, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6207) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6208) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6210) 	for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6211) 		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6212) 		if (!insn_is_subprog_call(insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6213) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6215) 		relo = find_prog_insn_relo(prog, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6216) 		if (relo && relo->type != RELO_CALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6217) 			pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6218) 				prog->name, insn_idx, relo->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6219) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6220) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6221) 		if (relo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6222) 			/* sub-program instruction index is a combination of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6223) 			 * an offset of a symbol pointed to by relocation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6224) 			 * call instruction's imm field; for global functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6225) 			 * call always has imm = -1, but for static functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6226) 			 * relocation is against STT_SECTION and insn->imm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6227) 			 * points to a start of a static function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6228) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6229) 			sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6230) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6231) 			/* if subprogram call is to a static function within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6232) 			 * the same ELF section, there won't be any relocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6233) 			 * emitted, but it also means there is no additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6234) 			 * offset necessary, insns->imm is relative to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6235) 			 * instruction's original position within the section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6236) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6237) 			sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6238) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6240) 		/* we enforce that sub-programs should be in .text section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6241) 		subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6242) 		if (!subprog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6243) 			pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6244) 				prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6245) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6248) 		/* if it's the first call instruction calling into this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6249) 		 * subprogram (meaning this subprog hasn't been processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6250) 		 * yet) within the context of current main program:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6251) 		 *   - append it at the end of main program's instructions blog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6252) 		 *   - process is recursively, while current program is put on hold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6253) 		 *   - if that subprogram calls some other not yet processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6254) 		 *   subprogram, same thing will happen recursively until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6255) 		 *   there are no more unprocesses subprograms left to append
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6256) 		 *   and relocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6258) 		if (subprog->sub_insn_off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6259) 			subprog->sub_insn_off = main_prog->insns_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6261) 			new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6262) 			insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6263) 			if (!insns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6264) 				pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6265) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6266) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6267) 			main_prog->insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6268) 			main_prog->insns_cnt = new_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6270) 			memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6271) 			       subprog->insns_cnt * sizeof(*insns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6273) 			pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6274) 				 main_prog->name, subprog->insns_cnt, subprog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6276) 			err = bpf_object__reloc_code(obj, main_prog, subprog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6277) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6278) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6281) 		/* main_prog->insns memory could have been re-allocated, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6282) 		 * calculate pointer again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6283) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6284) 		insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6285) 		/* calculate correct instruction position within current main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6286) 		 * prog; each main prog can have a different set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6287) 		 * subprograms appended (potentially in different order as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6288) 		 * well), so position of any subprog can be different for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6289) 		 * different main programs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6290) 		insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6292) 		if (relo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6293) 			relo->processed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6295) 		pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6296) 			 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6299) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6303)  * Relocate sub-program calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6304)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6305)  * Algorithm operates as follows. Each entry-point BPF program (referred to as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6306)  * main prog) is processed separately. For each subprog (non-entry functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6307)  * that can be called from either entry progs or other subprogs) gets their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6308)  * sub_insn_off reset to zero. This serves as indicator that this subprogram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6309)  * hasn't been yet appended and relocated within current main prog. Once its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6310)  * relocated, sub_insn_off will point at the position within current main prog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6311)  * where given subprog was appended. This will further be used to relocate all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6312)  * the call instructions jumping into this subprog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6313)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6314)  * We start with main program and process all call instructions. If the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6315)  * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6316)  * is zero), subprog instructions are appended at the end of main program's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6317)  * instruction array. Then main program is "put on hold" while we recursively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6318)  * process newly appended subprogram. If that subprogram calls into another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6319)  * subprogram that hasn't been appended, new subprogram is appended again to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6320)  * the *main* prog's instructions (subprog's instructions are always left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6321)  * untouched, as they need to be in unmodified state for subsequent main progs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6322)  * and subprog instructions are always sent only as part of a main prog) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6323)  * the process continues recursively. Once all the subprogs called from a main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6324)  * prog or any of its subprogs are appended (and relocated), all their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6325)  * positions within finalized instructions array are known, so it's easy to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6326)  * rewrite call instructions with correct relative offsets, corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6327)  * desired target subprog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6329)  * Its important to realize that some subprogs might not be called from some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6330)  * main prog and any of its called/used subprogs. Those will keep their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6331)  * subprog->sub_insn_off as zero at all times and won't be appended to current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6332)  * main prog and won't be relocated within the context of current main prog.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6333)  * They might still be used from other main progs later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6335)  * Visually this process can be shown as below. Suppose we have two main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6336)  * programs mainA and mainB and BPF object contains three subprogs: subA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6337)  * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6338)  * subC both call subB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6339)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6340)  *        +--------+ +-------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6341)  *        |        v v       |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6342)  *     +--+---+ +--+-+-+ +---+--+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6343)  *     | subA | | subB | | subC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6344)  *     +--+---+ +------+ +---+--+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6345)  *        ^                  ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6346)  *        |                  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6347)  *    +---+-------+   +------+----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6348)  *    |   mainA   |   |   mainB   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6349)  *    +-----------+   +-----------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6351)  * We'll start relocating mainA, will find subA, append it and start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6352)  * processing sub A recursively:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6354)  *    +-----------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6355)  *    |   mainA   | subA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6356)  *    +-----------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6357)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6358)  * At this point we notice that subB is used from subA, so we append it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6359)  * relocate (there are no further subcalls from subB):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6361)  *    +-----------+------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6362)  *    |   mainA   | subA | subB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6363)  *    +-----------+------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6364)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6365)  * At this point, we relocate subA calls, then go one level up and finish with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6366)  * relocatin mainA calls. mainA is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6367)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6368)  * For mainB process is similar but results in different order. We start with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6369)  * mainB and skip subA and subB, as mainB never calls them (at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6370)  * directly), but we see subC is needed, so we append and start processing it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6371)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6372)  *    +-----------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6373)  *    |   mainB   | subC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6374)  *    +-----------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6375)  * Now we see subC needs subB, so we go back to it, append and relocate it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6377)  *    +-----------+------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6378)  *    |   mainB   | subC | subB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6379)  *    +-----------+------+------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6381)  * At this point we unwind recursion, relocate calls in subC, then in mainB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6383) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6384) bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6386) 	struct bpf_program *subprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6387) 	int i, j, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6389) 	/* mark all subprogs as not relocated (yet) within the context of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6390) 	 * current main program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6391) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6392) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6393) 		subprog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6394) 		if (!prog_is_subprog(obj, subprog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6395) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6397) 		subprog->sub_insn_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6398) 		for (j = 0; j < subprog->nr_reloc; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6399) 			if (subprog->reloc_desc[j].type == RELO_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6400) 				subprog->reloc_desc[j].processed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6403) 	err = bpf_object__reloc_code(obj, prog, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6404) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6405) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6408) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6411) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6412) bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6414) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6415) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6416) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6418) 	if (obj->btf_ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6419) 		err = bpf_object__relocate_core(obj, targ_btf_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6420) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6421) 			pr_warn("failed to perform CO-RE relocations: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6422) 				err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6423) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6426) 	/* relocate data references first for all programs and sub-programs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6427) 	 * as they don't change relative to code locations, so subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6428) 	 * subprogram processing won't need to re-calculate any of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6429) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6430) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6431) 		prog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6432) 		err = bpf_object__relocate_data(obj, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6433) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6434) 			pr_warn("prog '%s': failed to relocate data references: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6435) 				prog->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6436) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6439) 	/* now relocate subprogram calls and append used subprograms to main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6440) 	 * programs; each copy of subprogram code needs to be relocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6441) 	 * differently for each main program, because its code location might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6442) 	 * have changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6443) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6444) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6445) 		prog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6446) 		/* sub-program's sub-calls are relocated within the context of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6447) 		 * its main program only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6448) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6449) 		if (prog_is_subprog(obj, prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6450) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6452) 		err = bpf_object__relocate_calls(obj, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6453) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6454) 			pr_warn("prog '%s': failed to relocate calls: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6455) 				prog->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6456) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6459) 	/* free up relocation descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6460) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6461) 		prog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6462) 		zfree(&prog->reloc_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6463) 		prog->nr_reloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6465) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6468) static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6469) 					    GElf_Shdr *shdr, Elf_Data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6471) static int bpf_object__collect_map_relos(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6472) 					 GElf_Shdr *shdr, Elf_Data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6474) 	const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6475) 	int i, j, nrels, new_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6476) 	const struct btf_var_secinfo *vi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6477) 	const struct btf_type *sec, *var, *def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6478) 	struct bpf_map *map = NULL, *targ_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6479) 	const struct btf_member *member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6480) 	const char *name, *mname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6481) 	Elf_Data *symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6482) 	unsigned int moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6483) 	GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6484) 	GElf_Rel rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6485) 	void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6487) 	if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6488) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6489) 	sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6490) 	if (!sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6491) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6493) 	symbols = obj->efile.symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6494) 	nrels = shdr->sh_size / shdr->sh_entsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6495) 	for (i = 0; i < nrels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6496) 		if (!gelf_getrel(data, i, &rel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6497) 			pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6498) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6500) 		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6501) 			pr_warn(".maps relo #%d: symbol %zx not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6502) 				i, (size_t)GELF_R_SYM(rel.r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6503) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6504) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6505) 		name = elf_sym_str(obj, sym.st_name) ?: "<?>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6506) 		if (sym.st_shndx != obj->efile.btf_maps_shndx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6507) 			pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6508) 				i, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6509) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6510) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6512) 		pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6513) 			 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6514) 			 (size_t)rel.r_offset, sym.st_name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6516) 		for (j = 0; j < obj->nr_maps; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6517) 			map = &obj->maps[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6518) 			if (map->sec_idx != obj->efile.btf_maps_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6519) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6521) 			vi = btf_var_secinfos(sec) + map->btf_var_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6522) 			if (vi->offset <= rel.r_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6523) 			    rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6524) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6526) 		if (j == obj->nr_maps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6527) 			pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6528) 				i, name, (size_t)rel.r_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6529) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6530) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6532) 		if (!bpf_map_type__is_map_in_map(map->def.type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6533) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6534) 		if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6535) 		    map->def.key_size != sizeof(int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6536) 			pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6537) 				i, map->name, sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6538) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6541) 		targ_map = bpf_object__find_map_by_name(obj, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6542) 		if (!targ_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6543) 			return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6545) 		var = btf__type_by_id(obj->btf, vi->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6546) 		def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6547) 		if (btf_vlen(def) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6548) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6549) 		member = btf_members(def) + btf_vlen(def) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6550) 		mname = btf__name_by_offset(obj->btf, member->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6551) 		if (strcmp(mname, "values"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6552) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6554) 		moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6555) 		if (rel.r_offset - vi->offset < moff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6556) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6558) 		moff = rel.r_offset - vi->offset - moff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6559) 		/* here we use BPF pointer size, which is always 64 bit, as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6560) 		 * are parsing ELF that was built for BPF target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6561) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6562) 		if (moff % bpf_ptr_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6563) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6564) 		moff /= bpf_ptr_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6565) 		if (moff >= map->init_slots_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6566) 			new_sz = moff + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6567) 			tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6568) 			if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6569) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6570) 			map->init_slots = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6571) 			memset(map->init_slots + map->init_slots_sz, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6572) 			       (new_sz - map->init_slots_sz) * host_ptr_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6573) 			map->init_slots_sz = new_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6574) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6575) 		map->init_slots[moff] = targ_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6577) 		pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6578) 			 i, map->name, moff, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6581) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6584) static int cmp_relocs(const void *_a, const void *_b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6586) 	const struct reloc_desc *a = _a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6587) 	const struct reloc_desc *b = _b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6589) 	if (a->insn_idx != b->insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6590) 		return a->insn_idx < b->insn_idx ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6592) 	/* no two relocations should have the same insn_idx, but ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6593) 	if (a->type != b->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6594) 		return a->type < b->type ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6596) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6599) static int bpf_object__collect_relos(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6601) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6603) 	for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6604) 		GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6605) 		Elf_Data *data = obj->efile.reloc_sects[i].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6606) 		int idx = shdr->sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6608) 		if (shdr->sh_type != SHT_REL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6609) 			pr_warn("internal error at %d\n", __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6610) 			return -LIBBPF_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6611) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6613) 		if (idx == obj->efile.st_ops_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6614) 			err = bpf_object__collect_st_ops_relos(obj, shdr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6615) 		else if (idx == obj->efile.btf_maps_shndx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6616) 			err = bpf_object__collect_map_relos(obj, shdr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6617) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6618) 			err = bpf_object__collect_prog_relos(obj, shdr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6619) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6620) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6623) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6624) 		struct bpf_program *p = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6625) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6626) 		if (!p->nr_reloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6627) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6629) 		qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6631) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6634) static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6636) 	if (BPF_CLASS(insn->code) == BPF_JMP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6637) 	    BPF_OP(insn->code) == BPF_CALL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6638) 	    BPF_SRC(insn->code) == BPF_K &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6639) 	    insn->src_reg == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6640) 	    insn->dst_reg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6641) 		    *func_id = insn->imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6642) 		    return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6644) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6647) static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6649) 	struct bpf_insn *insn = prog->insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6650) 	enum bpf_func_id func_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6651) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6653) 	for (i = 0; i < prog->insns_cnt; i++, insn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6654) 		if (!insn_is_helper_call(insn, &func_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6655) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6657) 		/* on kernels that don't yet support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6658) 		 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6659) 		 * to bpf_probe_read() which works well for old kernels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6660) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6661) 		switch (func_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6662) 		case BPF_FUNC_probe_read_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6663) 		case BPF_FUNC_probe_read_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6664) 			if (!kernel_supports(FEAT_PROBE_READ_KERN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6665) 				insn->imm = BPF_FUNC_probe_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6666) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6667) 		case BPF_FUNC_probe_read_kernel_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6668) 		case BPF_FUNC_probe_read_user_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6669) 			if (!kernel_supports(FEAT_PROBE_READ_KERN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6670) 				insn->imm = BPF_FUNC_probe_read_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6671) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6672) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6673) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6674) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6676) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6679) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6680) load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6681) 	     char *license, __u32 kern_version, int *pfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6683) 	struct bpf_load_program_attr load_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6684) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6685) 	size_t log_buf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6686) 	char *log_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6687) 	int btf_fd, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6689) 	if (!insns || !insns_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6690) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6692) 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6693) 	load_attr.prog_type = prog->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6694) 	/* old kernels might not support specifying expected_attach_type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6695) 	if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6696) 	    prog->sec_def->is_exp_attach_type_optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6697) 		load_attr.expected_attach_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6698) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6699) 		load_attr.expected_attach_type = prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6700) 	if (kernel_supports(FEAT_PROG_NAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6701) 		load_attr.name = prog->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6702) 	load_attr.insns = insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6703) 	load_attr.insns_cnt = insns_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6704) 	load_attr.license = license;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6705) 	if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6706) 	    prog->type == BPF_PROG_TYPE_LSM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6707) 		load_attr.attach_btf_id = prog->attach_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6708) 	} else if (prog->type == BPF_PROG_TYPE_TRACING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6709) 		   prog->type == BPF_PROG_TYPE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6710) 		load_attr.attach_prog_fd = prog->attach_prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6711) 		load_attr.attach_btf_id = prog->attach_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6712) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6713) 		load_attr.kern_version = kern_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6714) 		load_attr.prog_ifindex = prog->prog_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6716) 	/* specify func_info/line_info only if kernel supports them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6717) 	btf_fd = bpf_object__btf_fd(prog->obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6718) 	if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6719) 		load_attr.prog_btf_fd = btf_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6720) 		load_attr.func_info = prog->func_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6721) 		load_attr.func_info_rec_size = prog->func_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6722) 		load_attr.func_info_cnt = prog->func_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6723) 		load_attr.line_info = prog->line_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6724) 		load_attr.line_info_rec_size = prog->line_info_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6725) 		load_attr.line_info_cnt = prog->line_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6727) 	load_attr.log_level = prog->log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6728) 	load_attr.prog_flags = prog->prog_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6730) retry_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6731) 	if (log_buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6732) 		log_buf = malloc(log_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6733) 		if (!log_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6734) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6736) 		*log_buf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6739) 	ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6741) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6742) 		if (log_buf && load_attr.log_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6743) 			pr_debug("verifier log:\n%s", log_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6745) 		if (prog->obj->rodata_map_idx >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6746) 		    kernel_supports(FEAT_PROG_BIND_MAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6747) 			struct bpf_map *rodata_map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6748) 				&prog->obj->maps[prog->obj->rodata_map_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6750) 			if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6751) 				cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6752) 				pr_warn("prog '%s': failed to bind .rodata map: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6753) 					prog->name, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6754) 				/* Don't fail hard if can't bind rodata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6755) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6756) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6758) 		*pfd = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6759) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6760) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6763) 	if (!log_buf || errno == ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6764) 		log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6765) 				   log_buf_size << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6767) 		free(log_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6768) 		goto retry_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6770) 	ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6771) 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6772) 	pr_warn("load bpf program failed: %s\n", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6773) 	pr_perm_msg(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6775) 	if (log_buf && log_buf[0] != '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6776) 		ret = -LIBBPF_ERRNO__VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6777) 		pr_warn("-- BEGIN DUMP LOG ---\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6778) 		pr_warn("\n%s\n", log_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6779) 		pr_warn("-- END LOG --\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6780) 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6781) 		pr_warn("Program too large (%zu insns), at most %d insns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6782) 			load_attr.insns_cnt, BPF_MAXINSNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6783) 		ret = -LIBBPF_ERRNO__PROG2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6784) 	} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6785) 		/* Wrong program type? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6786) 		int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6788) 		load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6789) 		load_attr.expected_attach_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6790) 		fd = bpf_load_program_xattr(&load_attr, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6791) 		if (fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6792) 			close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6793) 			ret = -LIBBPF_ERRNO__PROGTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6794) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6795) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6798) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6799) 	free(log_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6800) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6803) static int libbpf_find_attach_btf_id(struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6805) int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6807) 	int err = 0, fd, i, btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6809) 	if (prog->obj->loaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6810) 		pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6811) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6814) 	if ((prog->type == BPF_PROG_TYPE_TRACING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6815) 	     prog->type == BPF_PROG_TYPE_LSM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6816) 	     prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6817) 		btf_id = libbpf_find_attach_btf_id(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6818) 		if (btf_id <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6819) 			return btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6820) 		prog->attach_btf_id = btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6821) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6823) 	if (prog->instances.nr < 0 || !prog->instances.fds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6824) 		if (prog->preprocessor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6825) 			pr_warn("Internal error: can't load program '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6826) 				prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6827) 			return -LIBBPF_ERRNO__INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6830) 		prog->instances.fds = malloc(sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6831) 		if (!prog->instances.fds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6832) 			pr_warn("Not enough memory for BPF fds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6833) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6835) 		prog->instances.nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6836) 		prog->instances.fds[0] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6839) 	if (!prog->preprocessor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6840) 		if (prog->instances.nr != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6841) 			pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6842) 				prog->name, prog->instances.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6843) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6844) 		err = load_program(prog, prog->insns, prog->insns_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6845) 				   license, kern_ver, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6846) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6847) 			prog->instances.fds[0] = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6848) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6851) 	for (i = 0; i < prog->instances.nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6852) 		struct bpf_prog_prep_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6853) 		bpf_program_prep_t preprocessor = prog->preprocessor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6855) 		memset(&result, 0, sizeof(result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6856) 		err = preprocessor(prog, i, prog->insns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6857) 				   prog->insns_cnt, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6858) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6859) 			pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6860) 				i, prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6861) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6862) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6864) 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6865) 			pr_debug("Skip loading the %dth instance of program '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6866) 				 i, prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6867) 			prog->instances.fds[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6868) 			if (result.pfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6869) 				*result.pfd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6870) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6873) 		err = load_program(prog, result.new_insn_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6874) 				   result.new_insn_cnt, license, kern_ver, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6875) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6876) 			pr_warn("Loading the %dth instance of program '%s' failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6877) 				i, prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6878) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6881) 		if (result.pfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6882) 			*result.pfd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6883) 		prog->instances.fds[i] = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6885) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6886) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6887) 		pr_warn("failed to load program '%s'\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6888) 	zfree(&prog->insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6889) 	prog->insns_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6890) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6893) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6894) bpf_object__load_progs(struct bpf_object *obj, int log_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6896) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6897) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6898) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6900) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6901) 		prog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6902) 		err = bpf_object__sanitize_prog(obj, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6903) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6904) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6907) 	for (i = 0; i < obj->nr_programs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6908) 		prog = &obj->programs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6909) 		if (prog_is_subprog(obj, prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6910) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6911) 		if (!prog->load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6912) 			pr_debug("prog '%s': skipped loading\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6913) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6915) 		prog->log_level |= log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6916) 		err = bpf_program__load(prog, obj->license, obj->kern_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6917) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6918) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6923) static const struct bpf_sec_def *find_sec_def(const char *sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6925) static struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6926) __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6927) 		   const struct bpf_object_open_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6929) 	const char *obj_name, *kconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6930) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6931) 	struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6932) 	char tmp_name[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6933) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6935) 	if (elf_version(EV_CURRENT) == EV_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6936) 		pr_warn("failed to init libelf for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6937) 			path ? : "(mem buf)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6938) 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6941) 	if (!OPTS_VALID(opts, bpf_object_open_opts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6942) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6944) 	obj_name = OPTS_GET(opts, object_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6945) 	if (obj_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6946) 		if (!obj_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6947) 			snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6948) 				 (unsigned long)obj_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6949) 				 (unsigned long)obj_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6950) 			obj_name = tmp_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6951) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6952) 		path = obj_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6953) 		pr_debug("loading object '%s' from buffer\n", obj_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6956) 	obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6957) 	if (IS_ERR(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6958) 		return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6960) 	kconfig = OPTS_GET(opts, kconfig, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6961) 	if (kconfig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6962) 		obj->kconfig = strdup(kconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6963) 		if (!obj->kconfig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6964) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6965) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6966) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6969) 	err = bpf_object__elf_init(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6970) 	err = err ? : bpf_object__check_endianness(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6971) 	err = err ? : bpf_object__elf_collect(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6972) 	err = err ? : bpf_object__collect_externs(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6973) 	err = err ? : bpf_object__finalize_btf(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6974) 	err = err ? : bpf_object__init_maps(obj, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6975) 	err = err ? : bpf_object__collect_relos(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6976) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6977) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6978) 	bpf_object__elf_finish(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6980) 	bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6981) 		prog->sec_def = find_sec_def(prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6982) 		if (!prog->sec_def)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6983) 			/* couldn't guess, but user might manually specify */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6984) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6986) 		if (prog->sec_def->is_sleepable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6987) 			prog->prog_flags |= BPF_F_SLEEPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6988) 		bpf_program__set_type(prog, prog->sec_def->prog_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6989) 		bpf_program__set_expected_attach_type(prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6990) 				prog->sec_def->expected_attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6992) 		if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6993) 		    prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6994) 			prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6995) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6997) 	return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6998) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6999) 	bpf_object__close(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7000) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7003) static struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7004) __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7006) 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7007) 		.relaxed_maps = flags & MAPS_RELAX_COMPAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7008) 	);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7010) 	/* param validation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7011) 	if (!attr->file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7012) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7014) 	pr_debug("loading %s\n", attr->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7015) 	return __bpf_object__open(attr->file, NULL, 0, &opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7018) struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7020) 	return __bpf_object__open_xattr(attr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7023) struct bpf_object *bpf_object__open(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7025) 	struct bpf_object_open_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7026) 		.file		= path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7027) 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7028) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7030) 	return bpf_object__open_xattr(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7033) struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7034) bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7036) 	if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7037) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7039) 	pr_debug("loading %s\n", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7041) 	return __bpf_object__open(path, NULL, 0, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7044) struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7045) bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7046) 		     const struct bpf_object_open_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7048) 	if (!obj_buf || obj_buf_sz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7049) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7051) 	return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7054) struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7055) bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7056) 			const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7058) 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7059) 		.object_name = name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7060) 		/* wrong default, but backwards-compatible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7061) 		.relaxed_maps = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7062) 	);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7064) 	/* returning NULL is wrong, but backwards-compatible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7065) 	if (!obj_buf || obj_buf_sz == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7066) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7068) 	return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7071) int bpf_object__unload(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7073) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7075) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7076) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7078) 	for (i = 0; i < obj->nr_maps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7079) 		zclose(obj->maps[i].fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7080) 		if (obj->maps[i].st_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7081) 			zfree(&obj->maps[i].st_ops->kern_vdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7084) 	for (i = 0; i < obj->nr_programs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7085) 		bpf_program__unload(&obj->programs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7087) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7090) static int bpf_object__sanitize_maps(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7092) 	struct bpf_map *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7094) 	bpf_object__for_each_map(m, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7095) 		if (!bpf_map__is_internal(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7096) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7097) 		if (!kernel_supports(FEAT_GLOBAL_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7098) 			pr_warn("kernel doesn't support global data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7099) 			return -ENOTSUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7100) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7101) 		if (!kernel_supports(FEAT_ARRAY_MMAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7102) 			m->def.map_flags ^= BPF_F_MMAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7105) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7108) static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7110) 	char sym_type, sym_name[500];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7111) 	unsigned long long sym_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7112) 	struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7113) 	int ret, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7114) 	FILE *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7116) 	f = fopen("/proc/kallsyms", "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7117) 	if (!f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7118) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7119) 		pr_warn("failed to open /proc/kallsyms: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7120) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7123) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7124) 		ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7125) 			     &sym_addr, &sym_type, sym_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7126) 		if (ret == EOF && feof(f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7127) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7128) 		if (ret != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7129) 			pr_warn("failed to read kallsyms entry: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7130) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7131) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7134) 		ext = find_extern_by_name(obj, sym_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7135) 		if (!ext || ext->type != EXT_KSYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7136) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7138) 		if (ext->is_set && ext->ksym.addr != sym_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7139) 			pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7140) 				sym_name, ext->ksym.addr, sym_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7141) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7142) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7143) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7144) 		if (!ext->is_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7145) 			ext->is_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7146) 			ext->ksym.addr = sym_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7147) 			pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7151) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7152) 	fclose(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7153) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7156) static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7158) 	struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7159) 	int i, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7161) 	for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7162) 		const struct btf_type *targ_var, *targ_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7163) 		__u32 targ_type_id, local_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7164) 		const char *targ_var_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7165) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7167) 		ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7168) 		if (ext->type != EXT_KSYM || !ext->ksym.type_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7169) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7171) 		id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7172) 					    BTF_KIND_VAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7173) 		if (id <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7174) 			pr_warn("extern (ksym) '%s': failed to find BTF ID in vmlinux BTF.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7175) 				ext->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7176) 			return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7177) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7179) 		/* find local type_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7180) 		local_type_id = ext->ksym.type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7182) 		/* find target type_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7183) 		targ_var = btf__type_by_id(obj->btf_vmlinux, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7184) 		targ_var_name = btf__name_by_offset(obj->btf_vmlinux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7185) 						    targ_var->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7186) 		targ_type = skip_mods_and_typedefs(obj->btf_vmlinux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7187) 						   targ_var->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7188) 						   &targ_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7190) 		ret = bpf_core_types_are_compat(obj->btf, local_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7191) 						obj->btf_vmlinux, targ_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7192) 		if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7193) 			const struct btf_type *local_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7194) 			const char *targ_name, *local_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7196) 			local_type = btf__type_by_id(obj->btf, local_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7197) 			local_name = btf__name_by_offset(obj->btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7198) 							 local_type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7199) 			targ_name = btf__name_by_offset(obj->btf_vmlinux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7200) 							targ_type->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7202) 			pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7203) 				ext->name, local_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7204) 				btf_kind_str(local_type), local_name, targ_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7205) 				btf_kind_str(targ_type), targ_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7206) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7207) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7209) 		ext->is_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7210) 		ext->ksym.vmlinux_btf_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7211) 		pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7212) 			 ext->name, id, btf_kind_str(targ_var), targ_var_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7217) static int bpf_object__resolve_externs(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7218) 				       const char *extra_kconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7220) 	bool need_config = false, need_kallsyms = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7221) 	bool need_vmlinux_btf = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7222) 	struct extern_desc *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7223) 	void *kcfg_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7224) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7226) 	if (obj->nr_extern == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7227) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7229) 	if (obj->kconfig_map_idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7230) 		kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7232) 	for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7233) 		ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7235) 		if (ext->type == EXT_KCFG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7236) 		    strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7237) 			void *ext_val = kcfg_data + ext->kcfg.data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7238) 			__u32 kver = get_kernel_version();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7240) 			if (!kver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7241) 				pr_warn("failed to get kernel version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7242) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7243) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7244) 			err = set_kcfg_value_num(ext, ext_val, kver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7245) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7246) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7247) 			pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7248) 		} else if (ext->type == EXT_KCFG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7249) 			   strncmp(ext->name, "CONFIG_", 7) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7250) 			need_config = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7251) 		} else if (ext->type == EXT_KSYM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7252) 			if (ext->ksym.type_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7253) 				need_vmlinux_btf = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7254) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7255) 				need_kallsyms = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7256) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7257) 			pr_warn("unrecognized extern '%s'\n", ext->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7258) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7259) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7261) 	if (need_config && extra_kconfig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7262) 		err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7263) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7264) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7265) 		need_config = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7266) 		for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7267) 			ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7268) 			if (ext->type == EXT_KCFG && !ext->is_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7269) 				need_config = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7270) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7271) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7274) 	if (need_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7275) 		err = bpf_object__read_kconfig_file(obj, kcfg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7276) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7277) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7279) 	if (need_kallsyms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7280) 		err = bpf_object__read_kallsyms_file(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7281) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7282) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7284) 	if (need_vmlinux_btf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7285) 		err = bpf_object__resolve_ksyms_btf_id(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7286) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7287) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7289) 	for (i = 0; i < obj->nr_extern; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7290) 		ext = &obj->externs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7292) 		if (!ext->is_set && !ext->is_weak) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7293) 			pr_warn("extern %s (strong) not resolved\n", ext->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7294) 			return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7295) 		} else if (!ext->is_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7296) 			pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7297) 				 ext->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7298) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7301) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7304) int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7306) 	struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7307) 	int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7309) 	if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7310) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7311) 	obj = attr->obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7312) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7313) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7315) 	if (obj->loaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7316) 		pr_warn("object '%s': load can't be attempted twice\n", obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7317) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7320) 	err = bpf_object__probe_loading(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7321) 	err = err ? : bpf_object__load_vmlinux_btf(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7322) 	err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7323) 	err = err ? : bpf_object__sanitize_and_load_btf(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7324) 	err = err ? : bpf_object__sanitize_maps(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7325) 	err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7326) 	err = err ? : bpf_object__create_maps(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7327) 	err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7328) 	err = err ? : bpf_object__load_progs(obj, attr->log_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7330) 	btf__free(obj->btf_vmlinux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7331) 	obj->btf_vmlinux = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7333) 	obj->loaded = true; /* doesn't matter if successfully or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7335) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7336) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7338) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7339) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7340) 	/* unpin any maps that were auto-pinned during load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7341) 	for (i = 0; i < obj->nr_maps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7342) 		if (obj->maps[i].pinned && !obj->maps[i].reused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7343) 			bpf_map__unpin(&obj->maps[i], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7345) 	bpf_object__unload(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7346) 	pr_warn("failed to load object '%s'\n", obj->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7347) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7350) int bpf_object__load(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7352) 	struct bpf_object_load_attr attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7353) 		.obj = obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7354) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7356) 	return bpf_object__load_xattr(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7359) static int make_parent_dir(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7361) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7362) 	char *dname, *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7363) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7365) 	dname = strdup(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7366) 	if (dname == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7367) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7369) 	dir = dirname(dname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7370) 	if (mkdir(dir, 0700) && errno != EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7371) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7373) 	free(dname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7374) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7375) 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7376) 		pr_warn("failed to mkdir %s: %s\n", path, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7378) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7381) static int check_path(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7383) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7384) 	struct statfs st_fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7385) 	char *dname, *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7386) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7388) 	if (path == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7389) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7391) 	dname = strdup(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7392) 	if (dname == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7393) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7395) 	dir = dirname(dname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7396) 	if (statfs(dir, &st_fs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7397) 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7398) 		pr_warn("failed to statfs %s: %s\n", dir, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7399) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7401) 	free(dname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7403) 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7404) 		pr_warn("specified path %s is not on BPF FS\n", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7405) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7408) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7411) int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7412) 			      int instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7414) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7415) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7417) 	err = make_parent_dir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7418) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7419) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7421) 	err = check_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7422) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7423) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7425) 	if (prog == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7426) 		pr_warn("invalid program pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7427) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7430) 	if (instance < 0 || instance >= prog->instances.nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7431) 		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7432) 			instance, prog->name, prog->instances.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7433) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7436) 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7437) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7438) 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7439) 		pr_warn("failed to pin program: %s\n", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7440) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7442) 	pr_debug("pinned program '%s'\n", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7444) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7447) int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7448) 				int instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7450) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7452) 	err = check_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7453) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7454) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7456) 	if (prog == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7457) 		pr_warn("invalid program pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7458) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7461) 	if (instance < 0 || instance >= prog->instances.nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7462) 		pr_warn("invalid prog instance %d of prog %s (max %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7463) 			instance, prog->name, prog->instances.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7464) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7467) 	err = unlink(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7468) 	if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7469) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7470) 	pr_debug("unpinned program '%s'\n", path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7472) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7475) int bpf_program__pin(struct bpf_program *prog, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7477) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7479) 	err = make_parent_dir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7480) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7481) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7483) 	err = check_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7484) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7485) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7487) 	if (prog == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7488) 		pr_warn("invalid program pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7489) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7492) 	if (prog->instances.nr <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7493) 		pr_warn("no instances of prog %s to pin\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7494) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7497) 	if (prog->instances.nr == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7498) 		/* don't create subdirs when pinning single instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7499) 		return bpf_program__pin_instance(prog, path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7502) 	for (i = 0; i < prog->instances.nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7503) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7504) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7506) 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7507) 		if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7508) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7509) 			goto err_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7510) 		} else if (len >= PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7511) 			err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7512) 			goto err_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7513) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7515) 		err = bpf_program__pin_instance(prog, buf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7516) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7517) 			goto err_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7520) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7522) err_unpin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7523) 	for (i = i - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7524) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7525) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7527) 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7528) 		if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7529) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7530) 		else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7531) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7533) 		bpf_program__unpin_instance(prog, buf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7536) 	rmdir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7538) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7541) int bpf_program__unpin(struct bpf_program *prog, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7543) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7545) 	err = check_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7546) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7547) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7549) 	if (prog == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7550) 		pr_warn("invalid program pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7551) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7554) 	if (prog->instances.nr <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7555) 		pr_warn("no instances of prog %s to pin\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7556) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7559) 	if (prog->instances.nr == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7560) 		/* don't create subdirs when pinning single instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7561) 		return bpf_program__unpin_instance(prog, path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7564) 	for (i = 0; i < prog->instances.nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7565) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7566) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7568) 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7569) 		if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7570) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7571) 		else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7572) 			return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7574) 		err = bpf_program__unpin_instance(prog, buf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7575) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7576) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7579) 	err = rmdir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7580) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7581) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7583) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7586) int bpf_map__pin(struct bpf_map *map, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7588) 	char *cp, errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7589) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7591) 	if (map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7592) 		pr_warn("invalid map pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7593) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7596) 	if (map->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7597) 		if (path && strcmp(path, map->pin_path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7598) 			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7599) 				bpf_map__name(map), map->pin_path, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7600) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7601) 		} else if (map->pinned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7602) 			pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7603) 				 bpf_map__name(map), map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7604) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7605) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7606) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7607) 		if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7608) 			pr_warn("missing a path to pin map '%s' at\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7609) 				bpf_map__name(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7610) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7611) 		} else if (map->pinned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7612) 			pr_warn("map '%s' already pinned\n", bpf_map__name(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7613) 			return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7614) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7616) 		map->pin_path = strdup(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7617) 		if (!map->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7618) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7619) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7623) 	err = make_parent_dir(map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7624) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7625) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7627) 	err = check_path(map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7628) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7629) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7631) 	if (bpf_obj_pin(map->fd, map->pin_path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7632) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7633) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7636) 	map->pinned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7637) 	pr_debug("pinned map '%s'\n", map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7639) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7641) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7642) 	cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7643) 	pr_warn("failed to pin map: %s\n", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7644) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7647) int bpf_map__unpin(struct bpf_map *map, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7649) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7651) 	if (map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7652) 		pr_warn("invalid map pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7653) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7656) 	if (map->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7657) 		if (path && strcmp(path, map->pin_path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7658) 			pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7659) 				bpf_map__name(map), map->pin_path, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7660) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7661) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7662) 		path = map->pin_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7663) 	} else if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7664) 		pr_warn("no path to unpin map '%s' from\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7665) 			bpf_map__name(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7666) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7669) 	err = check_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7670) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7671) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7673) 	err = unlink(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7674) 	if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7675) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7677) 	map->pinned = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7678) 	pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7680) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7683) int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7685) 	char *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7687) 	if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7688) 		new = strdup(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7689) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7690) 			return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7693) 	free(map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7694) 	map->pin_path = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7695) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7698) const char *bpf_map__get_pin_path(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7700) 	return map->pin_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7703) bool bpf_map__is_pinned(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7705) 	return map->pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7708) static void sanitize_pin_path(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7710) 	/* bpffs disallows periods in path names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7711) 	while (*s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7712) 		if (*s == '.')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7713) 			*s = '_';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7714) 		s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7718) int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7720) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7721) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7723) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7724) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7726) 	if (!obj->loaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7727) 		pr_warn("object not yet loaded; load it first\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7728) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7731) 	bpf_object__for_each_map(map, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7732) 		char *pin_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7733) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7735) 		if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7736) 			int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7738) 			len = snprintf(buf, PATH_MAX, "%s/%s", path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7739) 				       bpf_map__name(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7740) 			if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7741) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7742) 				goto err_unpin_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7743) 			} else if (len >= PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7744) 				err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7745) 				goto err_unpin_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7746) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7747) 			sanitize_pin_path(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7748) 			pin_path = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7749) 		} else if (!map->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7750) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7753) 		err = bpf_map__pin(map, pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7754) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7755) 			goto err_unpin_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7758) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7760) err_unpin_maps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7761) 	while ((map = bpf_map__prev(map, obj))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7762) 		if (!map->pin_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7763) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7765) 		bpf_map__unpin(map, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7768) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7771) int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7773) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7774) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7776) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7777) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7779) 	bpf_object__for_each_map(map, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7780) 		char *pin_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7781) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7783) 		if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7784) 			int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7786) 			len = snprintf(buf, PATH_MAX, "%s/%s", path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7787) 				       bpf_map__name(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7788) 			if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7789) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7790) 			else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7791) 				return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7792) 			sanitize_pin_path(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7793) 			pin_path = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7794) 		} else if (!map->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7795) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7796) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7798) 		err = bpf_map__unpin(map, pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7799) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7800) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7806) int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7808) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7809) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7811) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7812) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7814) 	if (!obj->loaded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7815) 		pr_warn("object not yet loaded; load it first\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7816) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7819) 	bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7820) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7821) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7823) 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7824) 			       prog->pin_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7825) 		if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7826) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7827) 			goto err_unpin_programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7828) 		} else if (len >= PATH_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7829) 			err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7830) 			goto err_unpin_programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7831) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7833) 		err = bpf_program__pin(prog, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7834) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7835) 			goto err_unpin_programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7838) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7840) err_unpin_programs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7841) 	while ((prog = bpf_program__prev(prog, obj))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7842) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7843) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7845) 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7846) 			       prog->pin_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7847) 		if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7848) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7849) 		else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7850) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7852) 		bpf_program__unpin(prog, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7855) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7858) int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7860) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7861) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7863) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7864) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7866) 	bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7867) 		char buf[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7868) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7870) 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7871) 			       prog->pin_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7872) 		if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7873) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7874) 		else if (len >= PATH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7875) 			return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7877) 		err = bpf_program__unpin(prog, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7878) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7879) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7885) int bpf_object__pin(struct bpf_object *obj, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7887) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7889) 	err = bpf_object__pin_maps(obj, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7890) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7891) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7893) 	err = bpf_object__pin_programs(obj, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7894) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7895) 		bpf_object__unpin_maps(obj, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7896) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7899) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7902) static void bpf_map__destroy(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7904) 	if (map->clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7905) 		map->clear_priv(map, map->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7906) 	map->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7907) 	map->clear_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7909) 	if (map->inner_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7910) 		bpf_map__destroy(map->inner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7911) 		zfree(&map->inner_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7914) 	zfree(&map->init_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7915) 	map->init_slots_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7917) 	if (map->mmaped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7918) 		munmap(map->mmaped, bpf_map_mmap_sz(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7919) 		map->mmaped = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7922) 	if (map->st_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7923) 		zfree(&map->st_ops->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7924) 		zfree(&map->st_ops->progs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7925) 		zfree(&map->st_ops->kern_func_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7926) 		zfree(&map->st_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7929) 	zfree(&map->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7930) 	zfree(&map->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7932) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7933) 		zclose(map->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7936) void bpf_object__close(struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7938) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7940) 	if (IS_ERR_OR_NULL(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7941) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7943) 	if (obj->clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7944) 		obj->clear_priv(obj, obj->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7946) 	bpf_object__elf_finish(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7947) 	bpf_object__unload(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7948) 	btf__free(obj->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7949) 	btf_ext__free(obj->btf_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7951) 	for (i = 0; i < obj->nr_maps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7952) 		bpf_map__destroy(&obj->maps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7954) 	zfree(&obj->kconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7955) 	zfree(&obj->externs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7956) 	obj->nr_extern = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7958) 	zfree(&obj->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7959) 	obj->nr_maps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7961) 	if (obj->programs && obj->nr_programs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7962) 		for (i = 0; i < obj->nr_programs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7963) 			bpf_program__exit(&obj->programs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7965) 	zfree(&obj->programs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7967) 	list_del(&obj->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7968) 	free(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7971) struct bpf_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7972) bpf_object__next(struct bpf_object *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7974) 	struct bpf_object *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7976) 	if (!prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7977) 		next = list_first_entry(&bpf_objects_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7978) 					struct bpf_object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7979) 					list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7980) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7981) 		next = list_next_entry(prev, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7983) 	/* Empty list is noticed here so don't need checking on entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7984) 	if (&next->list == &bpf_objects_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7985) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7987) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7990) const char *bpf_object__name(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7992) 	return obj ? obj->name : ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7995) unsigned int bpf_object__kversion(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7997) 	return obj ? obj->kern_version : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8000) struct btf *bpf_object__btf(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8002) 	return obj ? obj->btf : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8005) int bpf_object__btf_fd(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8007) 	return obj->btf ? btf__fd(obj->btf) : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8010) int bpf_object__set_priv(struct bpf_object *obj, void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8011) 			 bpf_object_clear_priv_t clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8013) 	if (obj->priv && obj->clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8014) 		obj->clear_priv(obj, obj->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8016) 	obj->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8017) 	obj->clear_priv = clear_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8018) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8021) void *bpf_object__priv(const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8023) 	return obj ? obj->priv : ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8026) static struct bpf_program *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8027) __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8028) 		    bool forward)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8030) 	size_t nr_programs = obj->nr_programs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8031) 	ssize_t idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8033) 	if (!nr_programs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8034) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8036) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8037) 		/* Iter from the beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8038) 		return forward ? &obj->programs[0] :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8039) 			&obj->programs[nr_programs - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8041) 	if (p->obj != obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8042) 		pr_warn("error: program handler doesn't match object\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8043) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8046) 	idx = (p - obj->programs) + (forward ? 1 : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8047) 	if (idx >= obj->nr_programs || idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8048) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8049) 	return &obj->programs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8052) struct bpf_program *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8053) bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8055) 	struct bpf_program *prog = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8057) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8058) 		prog = __bpf_program__iter(prog, obj, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8059) 	} while (prog && prog_is_subprog(obj, prog));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8061) 	return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8064) struct bpf_program *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8065) bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8067) 	struct bpf_program *prog = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8069) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8070) 		prog = __bpf_program__iter(prog, obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8071) 	} while (prog && prog_is_subprog(obj, prog));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8073) 	return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8076) int bpf_program__set_priv(struct bpf_program *prog, void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8077) 			  bpf_program_clear_priv_t clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8079) 	if (prog->priv && prog->clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8080) 		prog->clear_priv(prog, prog->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8082) 	prog->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8083) 	prog->clear_priv = clear_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8084) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8087) void *bpf_program__priv(const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8089) 	return prog ? prog->priv : ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8092) void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8094) 	prog->prog_ifindex = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8097) const char *bpf_program__name(const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8099) 	return prog->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8102) const char *bpf_program__section_name(const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8104) 	return prog->sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8107) const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8109) 	const char *title;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8111) 	title = prog->sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8112) 	if (needs_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8113) 		title = strdup(title);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8114) 		if (!title) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8115) 			pr_warn("failed to strdup program title\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8116) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8120) 	return title;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8123) bool bpf_program__autoload(const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8125) 	return prog->load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8128) int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8130) 	if (prog->obj->loaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8131) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8133) 	prog->load = autoload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8134) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8137) int bpf_program__fd(const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8139) 	return bpf_program__nth_fd(prog, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8142) size_t bpf_program__size(const struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8144) 	return prog->insns_cnt * BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8147) int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8148) 			  bpf_program_prep_t prep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8150) 	int *instances_fds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8152) 	if (nr_instances <= 0 || !prep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8153) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8155) 	if (prog->instances.nr > 0 || prog->instances.fds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8156) 		pr_warn("Can't set pre-processor after loading\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8157) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8160) 	instances_fds = malloc(sizeof(int) * nr_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8161) 	if (!instances_fds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8162) 		pr_warn("alloc memory failed for fds\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8163) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8166) 	/* fill all fd with -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8167) 	memset(instances_fds, -1, sizeof(int) * nr_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8169) 	prog->instances.nr = nr_instances;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8170) 	prog->instances.fds = instances_fds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8171) 	prog->preprocessor = prep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8175) int bpf_program__nth_fd(const struct bpf_program *prog, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8177) 	int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8179) 	if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8180) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8182) 	if (n >= prog->instances.nr || n < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8183) 		pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8184) 			n, prog->name, prog->instances.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8185) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8188) 	fd = prog->instances.fds[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8189) 	if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8190) 		pr_warn("%dth instance of program '%s' is invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8191) 			n, prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8192) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8195) 	return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8198) enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8200) 	return prog->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8203) void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8205) 	prog->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8208) static bool bpf_program__is_type(const struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8209) 				 enum bpf_prog_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8211) 	return prog ? (prog->type == type) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8214) #define BPF_PROG_TYPE_FNS(NAME, TYPE)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8215) int bpf_program__set_##NAME(struct bpf_program *prog)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8216) {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8217) 	if (!prog)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8218) 		return -EINVAL;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8219) 	bpf_program__set_type(prog, TYPE);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8220) 	return 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8221) }								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8222) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8223) bool bpf_program__is_##NAME(const struct bpf_program *prog)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8224) {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8225) 	return bpf_program__is_type(prog, TYPE);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8226) }								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8228) BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8229) BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8230) BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8231) BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8232) BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8233) BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8234) BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8235) BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8236) BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8237) BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8238) BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8239) BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8240) BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8242) enum bpf_attach_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8243) bpf_program__get_expected_attach_type(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8245) 	return prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8248) void bpf_program__set_expected_attach_type(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8249) 					   enum bpf_attach_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8251) 	prog->expected_attach_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8254) #define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional,	    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8255) 			  attachable, attach_btf)			    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8256) 	{								    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8257) 		.sec = string,						    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8258) 		.len = sizeof(string) - 1,				    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8259) 		.prog_type = ptype,					    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8260) 		.expected_attach_type = eatype,				    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8261) 		.is_exp_attach_type_optional = eatype_optional,		    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8262) 		.is_attachable = attachable,				    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8263) 		.is_attach_btf = attach_btf,				    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8266) /* Programs that can NOT be attached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8267) #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8269) /* Programs that can be attached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8270) #define BPF_APROG_SEC(string, ptype, atype) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8271) 	BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8273) /* Programs that must specify expected attach type at load time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8274) #define BPF_EAPROG_SEC(string, ptype, eatype) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8275) 	BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8277) /* Programs that use BTF to identify attach point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8278) #define BPF_PROG_BTF(string, ptype, eatype) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8279) 	BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8281) /* Programs that can be attached but attach type can't be identified by section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8282)  * name. Kept for backward compatibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8284) #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8286) #define SEC_DEF(sec_pfx, ptype, ...) {					    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8287) 	.sec = sec_pfx,							    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8288) 	.len = sizeof(sec_pfx) - 1,					    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8289) 	.prog_type = BPF_PROG_TYPE_##ptype,				    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8290) 	__VA_ARGS__							    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8293) static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8294) 				      struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8295) static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8296) 				  struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8297) static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8298) 				      struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8299) static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8300) 				     struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8301) static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8302) 				   struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8303) static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8304) 				    struct bpf_program *prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8306) static const struct bpf_sec_def section_defs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8307) 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8308) 	BPF_PROG_SEC("sk_reuseport",		BPF_PROG_TYPE_SK_REUSEPORT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8309) 	SEC_DEF("kprobe/", KPROBE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8310) 		.attach_fn = attach_kprobe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8311) 	BPF_PROG_SEC("uprobe/",			BPF_PROG_TYPE_KPROBE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8312) 	SEC_DEF("kretprobe/", KPROBE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8313) 		.attach_fn = attach_kprobe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8314) 	BPF_PROG_SEC("uretprobe/",		BPF_PROG_TYPE_KPROBE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8315) 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8316) 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8317) 	SEC_DEF("tracepoint/", TRACEPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8318) 		.attach_fn = attach_tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8319) 	SEC_DEF("tp/", TRACEPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8320) 		.attach_fn = attach_tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8321) 	SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8322) 		.attach_fn = attach_raw_tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8323) 	SEC_DEF("raw_tp/", RAW_TRACEPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8324) 		.attach_fn = attach_raw_tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8325) 	SEC_DEF("tp_btf/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8326) 		.expected_attach_type = BPF_TRACE_RAW_TP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8327) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8328) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8329) 	SEC_DEF("fentry/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8330) 		.expected_attach_type = BPF_TRACE_FENTRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8331) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8332) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8333) 	SEC_DEF("fmod_ret/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8334) 		.expected_attach_type = BPF_MODIFY_RETURN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8335) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8336) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8337) 	SEC_DEF("fexit/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8338) 		.expected_attach_type = BPF_TRACE_FEXIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8339) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8340) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8341) 	SEC_DEF("fentry.s/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8342) 		.expected_attach_type = BPF_TRACE_FENTRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8343) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8344) 		.is_sleepable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8345) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8346) 	SEC_DEF("fmod_ret.s/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8347) 		.expected_attach_type = BPF_MODIFY_RETURN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8348) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8349) 		.is_sleepable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8350) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8351) 	SEC_DEF("fexit.s/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8352) 		.expected_attach_type = BPF_TRACE_FEXIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8353) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8354) 		.is_sleepable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8355) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8356) 	SEC_DEF("freplace/", EXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8357) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8358) 		.attach_fn = attach_trace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8359) 	SEC_DEF("lsm/", LSM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8360) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8361) 		.expected_attach_type = BPF_LSM_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8362) 		.attach_fn = attach_lsm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8363) 	SEC_DEF("lsm.s/", LSM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8364) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8365) 		.is_sleepable = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8366) 		.expected_attach_type = BPF_LSM_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8367) 		.attach_fn = attach_lsm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8368) 	SEC_DEF("iter/", TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8369) 		.expected_attach_type = BPF_TRACE_ITER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8370) 		.is_attach_btf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8371) 		.attach_fn = attach_iter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8372) 	BPF_EAPROG_SEC("xdp_devmap/",		BPF_PROG_TYPE_XDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8373) 						BPF_XDP_DEVMAP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8374) 	BPF_EAPROG_SEC("xdp_cpumap/",		BPF_PROG_TYPE_XDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8375) 						BPF_XDP_CPUMAP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8376) 	BPF_APROG_SEC("xdp",			BPF_PROG_TYPE_XDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8377) 						BPF_XDP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8378) 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8379) 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8380) 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8381) 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8382) 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8383) 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8384) 						BPF_CGROUP_INET_INGRESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8385) 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8386) 						BPF_CGROUP_INET_EGRESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8387) 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8388) 	BPF_EAPROG_SEC("cgroup/sock_create",	BPF_PROG_TYPE_CGROUP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8389) 						BPF_CGROUP_INET_SOCK_CREATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8390) 	BPF_EAPROG_SEC("cgroup/sock_release",	BPF_PROG_TYPE_CGROUP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8391) 						BPF_CGROUP_INET_SOCK_RELEASE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8392) 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8393) 						BPF_CGROUP_INET_SOCK_CREATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8394) 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8395) 						BPF_CGROUP_INET4_POST_BIND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8396) 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8397) 						BPF_CGROUP_INET6_POST_BIND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8398) 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8399) 						BPF_CGROUP_DEVICE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8400) 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8401) 						BPF_CGROUP_SOCK_OPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8402) 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8403) 						BPF_SK_SKB_STREAM_PARSER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8404) 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8405) 						BPF_SK_SKB_STREAM_VERDICT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8406) 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8407) 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8408) 						BPF_SK_MSG_VERDICT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8409) 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8410) 						BPF_LIRC_MODE2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8411) 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8412) 						BPF_FLOW_DISSECTOR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8413) 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8414) 						BPF_CGROUP_INET4_BIND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8415) 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8416) 						BPF_CGROUP_INET6_BIND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8417) 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8418) 						BPF_CGROUP_INET4_CONNECT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8419) 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8420) 						BPF_CGROUP_INET6_CONNECT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8421) 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8422) 						BPF_CGROUP_UDP4_SENDMSG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8423) 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8424) 						BPF_CGROUP_UDP6_SENDMSG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8425) 	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8426) 						BPF_CGROUP_UDP4_RECVMSG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8427) 	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8428) 						BPF_CGROUP_UDP6_RECVMSG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8429) 	BPF_EAPROG_SEC("cgroup/getpeername4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8430) 						BPF_CGROUP_INET4_GETPEERNAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8431) 	BPF_EAPROG_SEC("cgroup/getpeername6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8432) 						BPF_CGROUP_INET6_GETPEERNAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8433) 	BPF_EAPROG_SEC("cgroup/getsockname4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8434) 						BPF_CGROUP_INET4_GETSOCKNAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8435) 	BPF_EAPROG_SEC("cgroup/getsockname6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8436) 						BPF_CGROUP_INET6_GETSOCKNAME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8437) 	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8438) 						BPF_CGROUP_SYSCTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8439) 	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8440) 						BPF_CGROUP_GETSOCKOPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8441) 	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8442) 						BPF_CGROUP_SETSOCKOPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8443) 	BPF_PROG_SEC("struct_ops",		BPF_PROG_TYPE_STRUCT_OPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8444) 	BPF_EAPROG_SEC("sk_lookup/",		BPF_PROG_TYPE_SK_LOOKUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8445) 						BPF_SK_LOOKUP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8446) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8448) #undef BPF_PROG_SEC_IMPL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8449) #undef BPF_PROG_SEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8450) #undef BPF_APROG_SEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8451) #undef BPF_EAPROG_SEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8452) #undef BPF_APROG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8453) #undef SEC_DEF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8455) #define MAX_TYPE_NAME_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8457) static const struct bpf_sec_def *find_sec_def(const char *sec_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8459) 	int i, n = ARRAY_SIZE(section_defs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8461) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8462) 		if (strncmp(sec_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8463) 			    section_defs[i].sec, section_defs[i].len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8464) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8465) 		return &section_defs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8467) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8470) static char *libbpf_get_type_names(bool attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8472) 	int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8473) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8475) 	buf = malloc(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8476) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8477) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8479) 	buf[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8480) 	/* Forge string buf with all available names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8481) 	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8482) 		if (attach_type && !section_defs[i].is_attachable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8483) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8485) 		if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8486) 			free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8487) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8489) 		strcat(buf, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8490) 		strcat(buf, section_defs[i].sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8493) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8496) int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8497) 			     enum bpf_attach_type *expected_attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8499) 	const struct bpf_sec_def *sec_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8500) 	char *type_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8502) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8503) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8505) 	sec_def = find_sec_def(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8506) 	if (sec_def) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8507) 		*prog_type = sec_def->prog_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8508) 		*expected_attach_type = sec_def->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8509) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8512) 	pr_debug("failed to guess program type from ELF section '%s'\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8513) 	type_names = libbpf_get_type_names(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8514) 	if (type_names != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8515) 		pr_debug("supported section(type) names are:%s\n", type_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8516) 		free(type_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8517) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8519) 	return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8522) static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8523) 						     size_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8525) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8526) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8528) 	for (i = 0; i < obj->nr_maps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8529) 		map = &obj->maps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8530) 		if (!bpf_map__is_struct_ops(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8531) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8532) 		if (map->sec_offset <= offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8533) 		    offset - map->sec_offset < map->def.value_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8534) 			return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8537) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8540) /* Collect the reloc from ELF and populate the st_ops->progs[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8541) static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8542) 					    GElf_Shdr *shdr, Elf_Data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8544) 	const struct btf_member *member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8545) 	struct bpf_struct_ops *st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8546) 	struct bpf_program *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8547) 	unsigned int shdr_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8548) 	const struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8549) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8550) 	Elf_Data *symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8551) 	unsigned int moff, insn_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8552) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8553) 	__u32 member_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8554) 	GElf_Sym sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8555) 	GElf_Rel rel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8556) 	int i, nrels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8558) 	symbols = obj->efile.symbols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8559) 	btf = obj->btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8560) 	nrels = shdr->sh_size / shdr->sh_entsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8561) 	for (i = 0; i < nrels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8562) 		if (!gelf_getrel(data, i, &rel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8563) 			pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8564) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8565) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8567) 		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8568) 			pr_warn("struct_ops reloc: symbol %zx not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8569) 				(size_t)GELF_R_SYM(rel.r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8570) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8571) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8573) 		name = elf_sym_str(obj, sym.st_name) ?: "<?>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8574) 		map = find_struct_ops_map_by_offset(obj, rel.r_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8575) 		if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8576) 			pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8577) 				(size_t)rel.r_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8578) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8581) 		moff = rel.r_offset - map->sec_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8582) 		shdr_idx = sym.st_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8583) 		st_ops = map->st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8584) 		pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8585) 			 map->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8586) 			 (long long)(rel.r_info >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8587) 			 (long long)sym.st_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8588) 			 shdr_idx, (size_t)rel.r_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8589) 			 map->sec_offset, sym.st_name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8591) 		if (shdr_idx >= SHN_LORESERVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8592) 			pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8593) 				map->name, (size_t)rel.r_offset, shdr_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8594) 			return -LIBBPF_ERRNO__RELOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8595) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8596) 		if (sym.st_value % BPF_INSN_SZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8597) 			pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8598) 				map->name, (unsigned long long)sym.st_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8599) 			return -LIBBPF_ERRNO__FORMAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8600) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8601) 		insn_idx = sym.st_value / BPF_INSN_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8603) 		member = find_member_by_offset(st_ops->type, moff * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8604) 		if (!member) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8605) 			pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8606) 				map->name, moff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8607) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8608) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8609) 		member_idx = member - btf_members(st_ops->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8610) 		name = btf__name_by_offset(btf, member->name_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8612) 		if (!resolve_func_ptr(btf, member->type, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8613) 			pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8614) 				map->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8615) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8616) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8618) 		prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8619) 		if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8620) 			pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8621) 				map->name, shdr_idx, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8622) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8625) 		if (prog->type == BPF_PROG_TYPE_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8626) 			const struct bpf_sec_def *sec_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8628) 			sec_def = find_sec_def(prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8629) 			if (sec_def &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8630) 			    sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8631) 				/* for pr_warn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8632) 				prog->type = sec_def->prog_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8633) 				goto invalid_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8634) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8636) 			prog->type = BPF_PROG_TYPE_STRUCT_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8637) 			prog->attach_btf_id = st_ops->type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8638) 			prog->expected_attach_type = member_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8639) 		} else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8640) 			   prog->attach_btf_id != st_ops->type_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8641) 			   prog->expected_attach_type != member_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8642) 			goto invalid_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8644) 		st_ops->progs[member_idx] = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8647) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8649) invalid_prog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8650) 	pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8651) 		map->name, prog->name, prog->sec_name, prog->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8652) 		prog->attach_btf_id, prog->expected_attach_type, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8653) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8656) #define BTF_TRACE_PREFIX "btf_trace_"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8657) #define BTF_LSM_PREFIX "bpf_lsm_"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8658) #define BTF_ITER_PREFIX "bpf_iter_"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8659) #define BTF_MAX_NAME_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8661) static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8662) 				   const char *name, __u32 kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8664) 	char btf_type_name[BTF_MAX_NAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8665) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8667) 	ret = snprintf(btf_type_name, sizeof(btf_type_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8668) 		       "%s%s", prefix, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8669) 	/* snprintf returns the number of characters written excluding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8670) 	 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8671) 	 * indicates truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8672) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8673) 	if (ret < 0 || ret >= sizeof(btf_type_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8674) 		return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8675) 	return btf__find_by_name_kind(btf, btf_type_name, kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8678) static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8679) 					enum bpf_attach_type attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8681) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8683) 	if (attach_type == BPF_TRACE_RAW_TP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8684) 		err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8685) 					      BTF_KIND_TYPEDEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8686) 	else if (attach_type == BPF_LSM_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8687) 		err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8688) 					      BTF_KIND_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8689) 	else if (attach_type == BPF_TRACE_ITER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8690) 		err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8691) 					      BTF_KIND_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8692) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8693) 		err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8695) 	if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8696) 		pr_warn("%s is not found in vmlinux BTF\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8698) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8701) int libbpf_find_vmlinux_btf_id(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8702) 			       enum bpf_attach_type attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8704) 	struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8705) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8707) 	btf = libbpf_find_kernel_btf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8708) 	if (IS_ERR(btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8709) 		pr_warn("vmlinux BTF is not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8710) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8713) 	err = __find_vmlinux_btf_id(btf, name, attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8714) 	btf__free(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8715) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8718) static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8720) 	struct bpf_prog_info_linear *info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8721) 	struct bpf_prog_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8722) 	struct btf *btf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8723) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8725) 	info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8726) 	if (IS_ERR_OR_NULL(info_linear)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8727) 		pr_warn("failed get_prog_info_linear for FD %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8728) 			attach_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8729) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8731) 	info = &info_linear->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8732) 	if (!info->btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8733) 		pr_warn("The target program doesn't have BTF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8734) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8736) 	if (btf__get_from_id(info->btf_id, &btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8737) 		pr_warn("Failed to get BTF of the program\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8738) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8740) 	err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8741) 	btf__free(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8742) 	if (err <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8743) 		pr_warn("%s is not found in prog's BTF\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8744) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8746) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8747) 	free(info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8748) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8751) static int libbpf_find_attach_btf_id(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8753) 	enum bpf_attach_type attach_type = prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8754) 	__u32 attach_prog_fd = prog->attach_prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8755) 	const char *name = prog->sec_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8756) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8758) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8759) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8761) 	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8762) 		if (!section_defs[i].is_attach_btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8763) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8764) 		if (strncmp(name, section_defs[i].sec, section_defs[i].len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8765) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8766) 		if (attach_prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8767) 			err = libbpf_find_prog_btf_id(name + section_defs[i].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8768) 						      attach_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8769) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8770) 			err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8771) 						    name + section_defs[i].len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8772) 						    attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8773) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8775) 	pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8776) 	return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8779) int libbpf_attach_type_by_name(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8780) 			       enum bpf_attach_type *attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8782) 	char *type_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8783) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8785) 	if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8786) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8788) 	for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8789) 		if (strncmp(name, section_defs[i].sec, section_defs[i].len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8790) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8791) 		if (!section_defs[i].is_attachable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8792) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8793) 		*attach_type = section_defs[i].expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8794) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8796) 	pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8797) 	type_names = libbpf_get_type_names(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8798) 	if (type_names != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8799) 		pr_debug("attachable section(type) names are:%s\n", type_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8800) 		free(type_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8803) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8806) int bpf_map__fd(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8808) 	return map ? map->fd : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8811) const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8813) 	return map ? &map->def : ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8816) const char *bpf_map__name(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8818) 	return map ? map->name : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8821) enum bpf_map_type bpf_map__type(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8823) 	return map->def.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8826) int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8828) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8829) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8830) 	map->def.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8831) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8834) __u32 bpf_map__map_flags(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8836) 	return map->def.map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8839) int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8841) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8842) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8843) 	map->def.map_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8844) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8847) __u32 bpf_map__numa_node(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8849) 	return map->numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8852) int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8854) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8855) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8856) 	map->numa_node = numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8857) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8860) __u32 bpf_map__key_size(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8862) 	return map->def.key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8865) int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8867) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8868) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8869) 	map->def.key_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8870) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8873) __u32 bpf_map__value_size(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8875) 	return map->def.value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8878) int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8880) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8881) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8882) 	map->def.value_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8883) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8886) __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8888) 	return map ? map->btf_key_type_id : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8891) __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8893) 	return map ? map->btf_value_type_id : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8896) int bpf_map__set_priv(struct bpf_map *map, void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8897) 		     bpf_map_clear_priv_t clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8899) 	if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8900) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8902) 	if (map->priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8903) 		if (map->clear_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8904) 			map->clear_priv(map, map->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8905) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8907) 	map->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8908) 	map->clear_priv = clear_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8909) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8912) void *bpf_map__priv(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8914) 	return map ? map->priv : ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8917) int bpf_map__set_initial_value(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8918) 			       const void *data, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8920) 	if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8921) 	    size != map->def.value_size || map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8922) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8924) 	memcpy(map->mmaped, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8925) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8928) bool bpf_map__is_offload_neutral(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8930) 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8933) bool bpf_map__is_internal(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8935) 	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8938) __u32 bpf_map__ifindex(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8940) 	return map->map_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8943) int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8945) 	if (map->fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8946) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8947) 	map->map_ifindex = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8948) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8951) int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8953) 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8954) 		pr_warn("error: unsupported map type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8955) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8957) 	if (map->inner_map_fd != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8958) 		pr_warn("error: inner_map_fd already specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8959) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8960) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8961) 	map->inner_map_fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8962) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8965) static struct bpf_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8966) __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8968) 	ssize_t idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8969) 	struct bpf_map *s, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8971) 	if (!obj || !obj->maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8972) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8974) 	s = obj->maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8975) 	e = obj->maps + obj->nr_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8977) 	if ((m < s) || (m >= e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8978) 		pr_warn("error in %s: map handler doesn't belong to object\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8979) 			 __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8980) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8983) 	idx = (m - obj->maps) + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8984) 	if (idx >= obj->nr_maps || idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8985) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8986) 	return &obj->maps[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8989) struct bpf_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8990) bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8992) 	if (prev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8993) 		return obj->maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8995) 	return __bpf_map__iter(prev, obj, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8998) struct bpf_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8999) bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9001) 	if (next == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9002) 		if (!obj->nr_maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9003) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9004) 		return obj->maps + obj->nr_maps - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9007) 	return __bpf_map__iter(next, obj, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9010) struct bpf_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9011) bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9013) 	struct bpf_map *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9015) 	bpf_object__for_each_map(pos, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9016) 		if (pos->name && !strcmp(pos->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9017) 			return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9019) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9022) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9023) bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9025) 	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9028) struct bpf_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9029) bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9031) 	return ERR_PTR(-ENOTSUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9034) long libbpf_get_error(const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9036) 	return PTR_ERR_OR_ZERO(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9039) int bpf_prog_load(const char *file, enum bpf_prog_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9040) 		  struct bpf_object **pobj, int *prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9042) 	struct bpf_prog_load_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9044) 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9045) 	attr.file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9046) 	attr.prog_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9047) 	attr.expected_attach_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9049) 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9052) int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9053) 			struct bpf_object **pobj, int *prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9055) 	struct bpf_object_open_attr open_attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9056) 	struct bpf_program *prog, *first_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9057) 	struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9058) 	struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9059) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9061) 	if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9062) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9063) 	if (!attr->file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9064) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9066) 	open_attr.file = attr->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9067) 	open_attr.prog_type = attr->prog_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9069) 	obj = bpf_object__open_xattr(&open_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9070) 	if (IS_ERR_OR_NULL(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9071) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9073) 	bpf_object__for_each_program(prog, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9074) 		enum bpf_attach_type attach_type = attr->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9075) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9076) 		 * to preserve backwards compatibility, bpf_prog_load treats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9077) 		 * attr->prog_type, if specified, as an override to whatever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9078) 		 * bpf_object__open guessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9079) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9080) 		if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9081) 			bpf_program__set_type(prog, attr->prog_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9082) 			bpf_program__set_expected_attach_type(prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9083) 							      attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9084) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9085) 		if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9086) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9087) 			 * we haven't guessed from section name and user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9088) 			 * didn't provide a fallback type, too bad...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9089) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9090) 			bpf_object__close(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9091) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9092) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9094) 		prog->prog_ifindex = attr->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9095) 		prog->log_level = attr->log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9096) 		prog->prog_flags |= attr->prog_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9097) 		if (!first_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9098) 			first_prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9101) 	bpf_object__for_each_map(map, obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9102) 		if (!bpf_map__is_offload_neutral(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9103) 			map->map_ifindex = attr->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9106) 	if (!first_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9107) 		pr_warn("object file doesn't contain bpf program\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9108) 		bpf_object__close(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9109) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9112) 	err = bpf_object__load(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9113) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9114) 		bpf_object__close(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9115) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9118) 	*pobj = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9119) 	*prog_fd = bpf_program__fd(first_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9120) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9123) struct bpf_link {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9124) 	int (*detach)(struct bpf_link *link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9125) 	int (*destroy)(struct bpf_link *link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9126) 	char *pin_path;		/* NULL, if not pinned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9127) 	int fd;			/* hook FD, -1 if not applicable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9128) 	bool disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9131) /* Replace link's underlying BPF program with the new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9132) int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9134) 	return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9137) /* Release "ownership" of underlying BPF resource (typically, BPF program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9138)  * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9139)  * link, when destructed through bpf_link__destroy() call won't attempt to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9140)  * detach/unregisted that BPF resource. This is useful in situations where,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9141)  * say, attached BPF program has to outlive userspace program that attached it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9142)  * in the system. Depending on type of BPF program, though, there might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9143)  * additional steps (like pinning BPF program in BPF FS) necessary to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9144)  * exit of userspace program doesn't trigger automatic detachment and clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9145)  * inside the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9147) void bpf_link__disconnect(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9149) 	link->disconnected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9152) int bpf_link__destroy(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9154) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9156) 	if (IS_ERR_OR_NULL(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9157) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9159) 	if (!link->disconnected && link->detach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9160) 		err = link->detach(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9161) 	if (link->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9162) 		link->destroy(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9163) 	if (link->pin_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9164) 		free(link->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9165) 	free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9167) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9170) int bpf_link__fd(const struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9172) 	return link->fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9175) const char *bpf_link__pin_path(const struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9177) 	return link->pin_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9180) static int bpf_link__detach_fd(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9182) 	return close(link->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9185) struct bpf_link *bpf_link__open(const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9187) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9188) 	int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9190) 	fd = bpf_obj_get(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9191) 	if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9192) 		fd = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9193) 		pr_warn("failed to open link at %s: %d\n", path, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9194) 		return ERR_PTR(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9197) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9198) 	if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9199) 		close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9200) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9202) 	link->detach = &bpf_link__detach_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9203) 	link->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9205) 	link->pin_path = strdup(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9206) 	if (!link->pin_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9207) 		bpf_link__destroy(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9208) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9211) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9214) int bpf_link__detach(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9216) 	return bpf_link_detach(link->fd) ? -errno : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9219) int bpf_link__pin(struct bpf_link *link, const char *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9221) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9223) 	if (link->pin_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9224) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9225) 	err = make_parent_dir(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9226) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9227) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9228) 	err = check_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9229) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9230) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9232) 	link->pin_path = strdup(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9233) 	if (!link->pin_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9234) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9236) 	if (bpf_obj_pin(link->fd, link->pin_path)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9237) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9238) 		zfree(&link->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9239) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9242) 	pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9246) int bpf_link__unpin(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9248) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9250) 	if (!link->pin_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9251) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9253) 	err = unlink(link->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9254) 	if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9255) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9257) 	pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9258) 	zfree(&link->pin_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9259) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9262) static int bpf_link__detach_perf_event(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9264) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9266) 	err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9267) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9268) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9270) 	close(link->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9271) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9274) struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9275) 						int pfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9277) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9278) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9279) 	int prog_fd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9281) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9282) 		pr_warn("prog '%s': invalid perf event FD %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9283) 			prog->name, pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9284) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9286) 	prog_fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9287) 	if (prog_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9288) 		pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9289) 			prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9290) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9293) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9294) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9295) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9296) 	link->detach = &bpf_link__detach_perf_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9297) 	link->fd = pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9299) 	if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9300) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9301) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9302) 		pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9303) 			prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9304) 		if (err == -EPROTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9305) 			pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9306) 				prog->name, pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9307) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9309) 	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9310) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9311) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9312) 		pr_warn("prog '%s': failed to enable pfd %d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9313) 			prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9314) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9316) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9320)  * this function is expected to parse integer in the range of [0, 2^31-1] from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9321)  * given file using scanf format string fmt. If actual parsed value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9322)  * negative, the result might be indistinguishable from error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9324) static int parse_uint_from_file(const char *file, const char *fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9326) 	char buf[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9327) 	int err, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9328) 	FILE *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9330) 	f = fopen(file, "r");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9331) 	if (!f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9332) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9333) 		pr_debug("failed to open '%s': %s\n", file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9334) 			 libbpf_strerror_r(err, buf, sizeof(buf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9335) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9337) 	err = fscanf(f, fmt, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9338) 	if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9339) 		err = err == EOF ? -EIO : -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9340) 		pr_debug("failed to parse '%s': %s\n", file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9341) 			libbpf_strerror_r(err, buf, sizeof(buf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9342) 		fclose(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9343) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9345) 	fclose(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9346) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9349) static int determine_kprobe_perf_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9351) 	const char *file = "/sys/bus/event_source/devices/kprobe/type";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9353) 	return parse_uint_from_file(file, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9356) static int determine_uprobe_perf_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9358) 	const char *file = "/sys/bus/event_source/devices/uprobe/type";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9360) 	return parse_uint_from_file(file, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9363) static int determine_kprobe_retprobe_bit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9365) 	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9367) 	return parse_uint_from_file(file, "config:%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9370) static int determine_uprobe_retprobe_bit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9372) 	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9374) 	return parse_uint_from_file(file, "config:%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9377) static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9378) 				 uint64_t offset, int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9380) 	struct perf_event_attr attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9381) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9382) 	int type, pfd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9384) 	type = uprobe ? determine_uprobe_perf_type()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9385) 		      : determine_kprobe_perf_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9386) 	if (type < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9387) 		pr_warn("failed to determine %s perf type: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9388) 			uprobe ? "uprobe" : "kprobe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9389) 			libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9390) 		return type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9392) 	if (retprobe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9393) 		int bit = uprobe ? determine_uprobe_retprobe_bit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9394) 				 : determine_kprobe_retprobe_bit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9396) 		if (bit < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9397) 			pr_warn("failed to determine %s retprobe bit: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9398) 				uprobe ? "uprobe" : "kprobe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9399) 				libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9400) 			return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9401) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9402) 		attr.config |= 1 << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9404) 	attr.size = sizeof(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9405) 	attr.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9406) 	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9407) 	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9409) 	/* pid filter is meaningful only for uprobes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9410) 	pfd = syscall(__NR_perf_event_open, &attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9411) 		      pid < 0 ? -1 : pid /* pid */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9412) 		      pid == -1 ? 0 : -1 /* cpu */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9413) 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9414) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9415) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9416) 		pr_warn("%s perf_event_open() failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9417) 			uprobe ? "uprobe" : "kprobe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9418) 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9419) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9421) 	return pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9424) struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9425) 					    bool retprobe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9426) 					    const char *func_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9428) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9429) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9430) 	int pfd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9432) 	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9433) 				    0 /* offset */, -1 /* pid */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9434) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9435) 		pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9436) 			prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9437) 			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9438) 		return ERR_PTR(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9440) 	link = bpf_program__attach_perf_event(prog, pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9441) 	if (IS_ERR(link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9442) 		close(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9443) 		err = PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9444) 		pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9445) 			prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9446) 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9447) 		return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9449) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9452) static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9453) 				      struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9455) 	const char *func_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9456) 	bool retprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9458) 	func_name = prog->sec_name + sec->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9459) 	retprobe = strcmp(sec->sec, "kretprobe/") == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9461) 	return bpf_program__attach_kprobe(prog, retprobe, func_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9464) struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9465) 					    bool retprobe, pid_t pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9466) 					    const char *binary_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9467) 					    size_t func_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9469) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9470) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9471) 	int pfd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9473) 	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9474) 				    binary_path, func_offset, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9475) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9476) 		pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9477) 			prog->name, retprobe ? "uretprobe" : "uprobe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9478) 			binary_path, func_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9479) 			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9480) 		return ERR_PTR(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9482) 	link = bpf_program__attach_perf_event(prog, pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9483) 	if (IS_ERR(link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9484) 		close(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9485) 		err = PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9486) 		pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9487) 			prog->name, retprobe ? "uretprobe" : "uprobe",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9488) 			binary_path, func_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9489) 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9490) 		return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9492) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9495) static int determine_tracepoint_id(const char *tp_category,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9496) 				   const char *tp_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9498) 	char file[PATH_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9499) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9501) 	ret = snprintf(file, sizeof(file),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9502) 		       "/sys/kernel/debug/tracing/events/%s/%s/id",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9503) 		       tp_category, tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9504) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9505) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9506) 	if (ret >= sizeof(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9507) 		pr_debug("tracepoint %s/%s path is too long\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9508) 			 tp_category, tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9509) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9511) 	return parse_uint_from_file(file, "%d\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9514) static int perf_event_open_tracepoint(const char *tp_category,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9515) 				      const char *tp_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9517) 	struct perf_event_attr attr = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9518) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9519) 	int tp_id, pfd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9521) 	tp_id = determine_tracepoint_id(tp_category, tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9522) 	if (tp_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9523) 		pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9524) 			tp_category, tp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9525) 			libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9526) 		return tp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9529) 	attr.type = PERF_TYPE_TRACEPOINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9530) 	attr.size = sizeof(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9531) 	attr.config = tp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9533) 	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9534) 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9535) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9536) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9537) 		pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9538) 			tp_category, tp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9539) 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9540) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9542) 	return pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9545) struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9546) 						const char *tp_category,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9547) 						const char *tp_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9549) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9550) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9551) 	int pfd, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9553) 	pfd = perf_event_open_tracepoint(tp_category, tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9554) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9555) 		pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9556) 			prog->name, tp_category, tp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9557) 			libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9558) 		return ERR_PTR(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9560) 	link = bpf_program__attach_perf_event(prog, pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9561) 	if (IS_ERR(link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9562) 		close(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9563) 		err = PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9564) 		pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9565) 			prog->name, tp_category, tp_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9566) 			libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9567) 		return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9569) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9572) static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9573) 				  struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9575) 	char *sec_name, *tp_cat, *tp_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9576) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9578) 	sec_name = strdup(prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9579) 	if (!sec_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9580) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9582) 	/* extract "tp/<category>/<name>" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9583) 	tp_cat = sec_name + sec->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9584) 	tp_name = strchr(tp_cat, '/');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9585) 	if (!tp_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9586) 		link = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9587) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9589) 	*tp_name = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9590) 	tp_name++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9592) 	link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9593) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9594) 	free(sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9595) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9598) struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9599) 						    const char *tp_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9601) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9602) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9603) 	int prog_fd, pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9605) 	prog_fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9606) 	if (prog_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9607) 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9608) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9611) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9612) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9613) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9614) 	link->detach = &bpf_link__detach_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9616) 	pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9617) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9618) 		pfd = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9619) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9620) 		pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9621) 			prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9622) 		return ERR_PTR(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9624) 	link->fd = pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9625) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9628) static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9629) 				      struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9631) 	const char *tp_name = prog->sec_name + sec->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9633) 	return bpf_program__attach_raw_tracepoint(prog, tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9636) /* Common logic for all BPF program types that attach to a btf_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9637) static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9639) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9640) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9641) 	int prog_fd, pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9643) 	prog_fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9644) 	if (prog_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9645) 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9646) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9649) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9650) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9651) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9652) 	link->detach = &bpf_link__detach_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9654) 	pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9655) 	if (pfd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9656) 		pfd = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9657) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9658) 		pr_warn("prog '%s': failed to attach: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9659) 			prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9660) 		return ERR_PTR(pfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9662) 	link->fd = pfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9663) 	return (struct bpf_link *)link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9666) struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9668) 	return bpf_program__attach_btf_id(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9671) struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9673) 	return bpf_program__attach_btf_id(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9676) static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9677) 				     struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9679) 	return bpf_program__attach_trace(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9682) static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9683) 				   struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9685) 	return bpf_program__attach_lsm(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9688) static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9689) 				    struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9691) 	return bpf_program__attach_iter(prog, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9694) static struct bpf_link *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9695) bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9696) 		       const char *target_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9698) 	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9699) 			    .target_btf_id = btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9700) 	enum bpf_attach_type attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9701) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9702) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9703) 	int prog_fd, link_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9705) 	prog_fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9706) 	if (prog_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9707) 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9708) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9711) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9712) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9713) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9714) 	link->detach = &bpf_link__detach_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9716) 	attach_type = bpf_program__get_expected_attach_type(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9717) 	link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9718) 	if (link_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9719) 		link_fd = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9720) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9721) 		pr_warn("prog '%s': failed to attach to %s: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9722) 			prog->name, target_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9723) 			libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9724) 		return ERR_PTR(link_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9726) 	link->fd = link_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9727) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9730) struct bpf_link *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9731) bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9733) 	return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9736) struct bpf_link *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9737) bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9739) 	return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9742) struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9744) 	/* target_fd/target_ifindex use the same field in LINK_CREATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9745) 	return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9748) struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9749) 					      int target_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9750) 					      const char *attach_func_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9752) 	int btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9754) 	if (!!target_fd != !!attach_func_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9755) 		pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9756) 			prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9757) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9760) 	if (prog->type != BPF_PROG_TYPE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9761) 		pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9762) 			prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9763) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9766) 	if (target_fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9767) 		btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9768) 		if (btf_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9769) 			return ERR_PTR(btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9771) 		return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9772) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9773) 		/* no target, so use raw_tracepoint_open for compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9774) 		 * with old kernels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9775) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9776) 		return bpf_program__attach_trace(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9780) struct bpf_link *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9781) bpf_program__attach_iter(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9782) 			 const struct bpf_iter_attach_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9784) 	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9785) 	char errmsg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9786) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9787) 	int prog_fd, link_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9788) 	__u32 target_fd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9790) 	if (!OPTS_VALID(opts, bpf_iter_attach_opts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9791) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9793) 	link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9794) 	link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9796) 	prog_fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9797) 	if (prog_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9798) 		pr_warn("prog '%s': can't attach before loaded\n", prog->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9799) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9802) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9803) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9804) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9805) 	link->detach = &bpf_link__detach_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9807) 	link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9808) 				  &link_create_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9809) 	if (link_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9810) 		link_fd = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9811) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9812) 		pr_warn("prog '%s': failed to attach to iterator: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9813) 			prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9814) 		return ERR_PTR(link_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9816) 	link->fd = link_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9817) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9820) struct bpf_link *bpf_program__attach(struct bpf_program *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9822) 	const struct bpf_sec_def *sec_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9824) 	sec_def = find_sec_def(prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9825) 	if (!sec_def || !sec_def->attach_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9826) 		return ERR_PTR(-ESRCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9828) 	return sec_def->attach_fn(sec_def, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9831) static int bpf_link__detach_struct_ops(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9833) 	__u32 zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9835) 	if (bpf_map_delete_elem(link->fd, &zero))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9836) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9838) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9841) struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9843) 	struct bpf_struct_ops *st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9844) 	struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9845) 	__u32 i, zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9846) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9848) 	if (!bpf_map__is_struct_ops(map) || map->fd == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9849) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9851) 	link = calloc(1, sizeof(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9852) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9853) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9855) 	st_ops = map->st_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9856) 	for (i = 0; i < btf_vlen(st_ops->type); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9857) 		struct bpf_program *prog = st_ops->progs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9858) 		void *kern_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9859) 		int prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9861) 		if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9862) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9864) 		prog_fd = bpf_program__fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9865) 		kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9866) 		*(unsigned long *)kern_data = prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9869) 	err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9870) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9871) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9872) 		free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9873) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9876) 	link->detach = bpf_link__detach_struct_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9877) 	link->fd = map->fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9879) 	return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9882) enum bpf_perf_event_ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9883) bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9884) 			   void **copy_mem, size_t *copy_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9885) 			   bpf_perf_event_print_t fn, void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9887) 	struct perf_event_mmap_page *header = mmap_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9888) 	__u64 data_head = ring_buffer_read_head(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9889) 	__u64 data_tail = header->data_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9890) 	void *base = ((__u8 *)header) + page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9891) 	int ret = LIBBPF_PERF_EVENT_CONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9892) 	struct perf_event_header *ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9893) 	size_t ehdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9895) 	while (data_head != data_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9896) 		ehdr = base + (data_tail & (mmap_size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9897) 		ehdr_size = ehdr->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9899) 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9900) 			void *copy_start = ehdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9901) 			size_t len_first = base + mmap_size - copy_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9902) 			size_t len_secnd = ehdr_size - len_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9904) 			if (*copy_size < ehdr_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9905) 				free(*copy_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9906) 				*copy_mem = malloc(ehdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9907) 				if (!*copy_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9908) 					*copy_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9909) 					ret = LIBBPF_PERF_EVENT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9910) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9911) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9912) 				*copy_size = ehdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9913) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9915) 			memcpy(*copy_mem, copy_start, len_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9916) 			memcpy(*copy_mem + len_first, base, len_secnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9917) 			ehdr = *copy_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9918) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9920) 		ret = fn(ehdr, private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9921) 		data_tail += ehdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9922) 		if (ret != LIBBPF_PERF_EVENT_CONT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9923) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9926) 	ring_buffer_write_tail(header, data_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9927) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9930) struct perf_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9932) struct perf_buffer_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9933) 	struct perf_event_attr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9934) 	/* if event_cb is specified, it takes precendence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9935) 	perf_buffer_event_fn event_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9936) 	/* sample_cb and lost_cb are higher-level common-case callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9937) 	perf_buffer_sample_fn sample_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9938) 	perf_buffer_lost_fn lost_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9939) 	void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9940) 	int cpu_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9941) 	int *cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9942) 	int *map_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9943) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9945) struct perf_cpu_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9946) 	struct perf_buffer *pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9947) 	void *base; /* mmap()'ed memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9948) 	void *buf; /* for reconstructing segmented data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9949) 	size_t buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9950) 	int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9951) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9952) 	int map_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9953) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9955) struct perf_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9956) 	perf_buffer_event_fn event_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9957) 	perf_buffer_sample_fn sample_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9958) 	perf_buffer_lost_fn lost_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9959) 	void *ctx; /* passed into callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9961) 	size_t page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9962) 	size_t mmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9963) 	struct perf_cpu_buf **cpu_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9964) 	struct epoll_event *events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9965) 	int cpu_cnt; /* number of allocated CPU buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9966) 	int epoll_fd; /* perf event FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9967) 	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9968) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9970) static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9971) 				      struct perf_cpu_buf *cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9973) 	if (!cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9974) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9975) 	if (cpu_buf->base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9976) 	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9977) 		pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9978) 	if (cpu_buf->fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9979) 		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9980) 		close(cpu_buf->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9982) 	free(cpu_buf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9983) 	free(cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9986) void perf_buffer__free(struct perf_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9988) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9990) 	if (IS_ERR_OR_NULL(pb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9991) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9992) 	if (pb->cpu_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9993) 		for (i = 0; i < pb->cpu_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9994) 			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9996) 			if (!cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9997) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9999) 			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) 			perf_buffer__free_cpu_buf(pb, cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002) 		free(pb->cpu_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004) 	if (pb->epoll_fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10005) 		close(pb->epoll_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10006) 	free(pb->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10007) 	free(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10010) static struct perf_cpu_buf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10011) perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10012) 			  int cpu, int map_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10014) 	struct perf_cpu_buf *cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10015) 	char msg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10016) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10018) 	cpu_buf = calloc(1, sizeof(*cpu_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10019) 	if (!cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10020) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10022) 	cpu_buf->pb = pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10023) 	cpu_buf->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10024) 	cpu_buf->map_key = map_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10026) 	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10027) 			      -1, PERF_FLAG_FD_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10028) 	if (cpu_buf->fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10029) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10030) 		pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10031) 			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10032) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10035) 	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10036) 			     PROT_READ | PROT_WRITE, MAP_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10037) 			     cpu_buf->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10038) 	if (cpu_buf->base == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10039) 		cpu_buf->base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10040) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10041) 		pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10042) 			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10043) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10046) 	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10047) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10048) 		pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10049) 			cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10050) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10053) 	return cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10055) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10056) 	perf_buffer__free_cpu_buf(pb, cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10057) 	return (struct perf_cpu_buf *)ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10060) static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10061) 					      struct perf_buffer_params *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10063) struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10064) 				     const struct perf_buffer_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10066) 	struct perf_buffer_params p = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10067) 	struct perf_event_attr attr = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10069) 	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10070) 	attr.type = PERF_TYPE_SOFTWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10071) 	attr.sample_type = PERF_SAMPLE_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10072) 	attr.sample_period = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10073) 	attr.wakeup_events = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10075) 	p.attr = &attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10076) 	p.sample_cb = opts ? opts->sample_cb : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10077) 	p.lost_cb = opts ? opts->lost_cb : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10078) 	p.ctx = opts ? opts->ctx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10080) 	return __perf_buffer__new(map_fd, page_cnt, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10083) struct perf_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10084) perf_buffer__new_raw(int map_fd, size_t page_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10085) 		     const struct perf_buffer_raw_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10087) 	struct perf_buffer_params p = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10089) 	p.attr = opts->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10090) 	p.event_cb = opts->event_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10091) 	p.ctx = opts->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10092) 	p.cpu_cnt = opts->cpu_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10093) 	p.cpus = opts->cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10094) 	p.map_keys = opts->map_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10096) 	return __perf_buffer__new(map_fd, page_cnt, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10099) static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10100) 					      struct perf_buffer_params *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10102) 	const char *online_cpus_file = "/sys/devices/system/cpu/online";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10103) 	struct bpf_map_info map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10104) 	char msg[STRERR_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10105) 	struct perf_buffer *pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10106) 	bool *online = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10107) 	__u32 map_info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10108) 	int err, i, j, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10110) 	if (page_cnt & (page_cnt - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10111) 		pr_warn("page count should be power of two, but is %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10112) 			page_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10113) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10116) 	/* best-effort sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10117) 	memset(&map, 0, sizeof(map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10118) 	map_info_len = sizeof(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10119) 	err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10120) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10121) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10122) 		/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10123) 		 * -EBADFD, -EFAULT, or -E2BIG on real error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10125) 		if (err != -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10126) 			pr_warn("failed to get map info for map FD %d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10127) 				map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10128) 			return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10130) 		pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10131) 			 map_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10132) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10133) 		if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10134) 			pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10135) 				map.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10136) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10140) 	pb = calloc(1, sizeof(*pb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10141) 	if (!pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10142) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10144) 	pb->event_cb = p->event_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10145) 	pb->sample_cb = p->sample_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10146) 	pb->lost_cb = p->lost_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10147) 	pb->ctx = p->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10149) 	pb->page_size = getpagesize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10150) 	pb->mmap_size = pb->page_size * page_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10151) 	pb->map_fd = map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10153) 	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10154) 	if (pb->epoll_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10155) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10156) 		pr_warn("failed to create epoll instance: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10157) 			libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10158) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10161) 	if (p->cpu_cnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10162) 		pb->cpu_cnt = p->cpu_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10163) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10164) 		pb->cpu_cnt = libbpf_num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10165) 		if (pb->cpu_cnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10166) 			err = pb->cpu_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10167) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10168) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10169) 		if (map.max_entries && map.max_entries < pb->cpu_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10170) 			pb->cpu_cnt = map.max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10173) 	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10174) 	if (!pb->events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10175) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10176) 		pr_warn("failed to allocate events: out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10177) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10179) 	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10180) 	if (!pb->cpu_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10181) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10182) 		pr_warn("failed to allocate buffers: out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10183) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10186) 	err = parse_cpu_mask_file(online_cpus_file, &online, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10187) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10188) 		pr_warn("failed to get online CPU mask: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10189) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10192) 	for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10193) 		struct perf_cpu_buf *cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10194) 		int cpu, map_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10196) 		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10197) 		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10199) 		/* in case user didn't explicitly requested particular CPUs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10200) 		 * be attached to, skip offline/not present CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10201) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10202) 		if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10203) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10205) 		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10206) 		if (IS_ERR(cpu_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10207) 			err = PTR_ERR(cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10208) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10211) 		pb->cpu_bufs[j] = cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10213) 		err = bpf_map_update_elem(pb->map_fd, &map_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10214) 					  &cpu_buf->fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10215) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10216) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10217) 			pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10218) 				cpu, map_key, cpu_buf->fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10219) 				libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10220) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10223) 		pb->events[j].events = EPOLLIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10224) 		pb->events[j].data.ptr = cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10225) 		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10226) 			      &pb->events[j]) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10227) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10228) 			pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10229) 				cpu, cpu_buf->fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10230) 				libbpf_strerror_r(err, msg, sizeof(msg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10231) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10233) 		j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10235) 	pb->cpu_cnt = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10236) 	free(online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10238) 	return pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10240) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10241) 	free(online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10242) 	if (pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10243) 		perf_buffer__free(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10244) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10247) struct perf_sample_raw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10248) 	struct perf_event_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10249) 	uint32_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10250) 	char data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10253) struct perf_sample_lost {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10254) 	struct perf_event_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10255) 	uint64_t id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10256) 	uint64_t lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10257) 	uint64_t sample_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10260) static enum bpf_perf_event_ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10261) perf_buffer__process_record(struct perf_event_header *e, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10263) 	struct perf_cpu_buf *cpu_buf = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10264) 	struct perf_buffer *pb = cpu_buf->pb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10265) 	void *data = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10267) 	/* user wants full control over parsing perf event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10268) 	if (pb->event_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10269) 		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10271) 	switch (e->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10272) 	case PERF_RECORD_SAMPLE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10273) 		struct perf_sample_raw *s = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10275) 		if (pb->sample_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10276) 			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10277) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10279) 	case PERF_RECORD_LOST: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10280) 		struct perf_sample_lost *s = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10282) 		if (pb->lost_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10283) 			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10284) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10286) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10287) 		pr_warn("unknown perf sample type %d\n", e->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10288) 		return LIBBPF_PERF_EVENT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10290) 	return LIBBPF_PERF_EVENT_CONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10293) static int perf_buffer__process_records(struct perf_buffer *pb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10294) 					struct perf_cpu_buf *cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10296) 	enum bpf_perf_event_ret ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10298) 	ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10299) 					 pb->page_size, &cpu_buf->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10300) 					 &cpu_buf->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10301) 					 perf_buffer__process_record, cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10302) 	if (ret != LIBBPF_PERF_EVENT_CONT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10303) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10304) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10307) int perf_buffer__epoll_fd(const struct perf_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10309) 	return pb->epoll_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10312) int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10314) 	int i, cnt, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10316) 	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10317) 	for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10318) 		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10320) 		err = perf_buffer__process_records(pb, cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10321) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10322) 			pr_warn("error while processing records: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10323) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10326) 	return cnt < 0 ? -errno : cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10329) /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10330)  * manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10332) size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10334) 	return pb->cpu_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10338)  * Return perf_event FD of a ring buffer in *buf_idx* slot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10339)  * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10340)  * select()/poll()/epoll() Linux syscalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10342) int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10344) 	struct perf_cpu_buf *cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10346) 	if (buf_idx >= pb->cpu_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10347) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10349) 	cpu_buf = pb->cpu_bufs[buf_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10350) 	if (!cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10351) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10353) 	return cpu_buf->fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10357)  * Consume data from perf ring buffer corresponding to slot *buf_idx* in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10358)  * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10359)  * consume, do nothing and return success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10360)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10361)  *   - 0 on success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10362)  *   - <0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10364) int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10366) 	struct perf_cpu_buf *cpu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10368) 	if (buf_idx >= pb->cpu_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10369) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10371) 	cpu_buf = pb->cpu_bufs[buf_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10372) 	if (!cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10373) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10375) 	return perf_buffer__process_records(pb, cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10378) int perf_buffer__consume(struct perf_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10380) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10382) 	for (i = 0; i < pb->cpu_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10383) 		struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10385) 		if (!cpu_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10386) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10388) 		err = perf_buffer__process_records(pb, cpu_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10389) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10390) 			pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10391) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10394) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10397) struct bpf_prog_info_array_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10398) 	int	array_offset;	/* e.g. offset of jited_prog_insns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10399) 	int	count_offset;	/* e.g. offset of jited_prog_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10400) 	int	size_offset;	/* > 0: offset of rec size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10401) 				 * < 0: fix size of -size_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10402) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10405) static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10406) 	[BPF_PROG_INFO_JITED_INSNS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10407) 		offsetof(struct bpf_prog_info, jited_prog_insns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10408) 		offsetof(struct bpf_prog_info, jited_prog_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10409) 		-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10410) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10411) 	[BPF_PROG_INFO_XLATED_INSNS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10412) 		offsetof(struct bpf_prog_info, xlated_prog_insns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10413) 		offsetof(struct bpf_prog_info, xlated_prog_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10414) 		-1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10415) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10416) 	[BPF_PROG_INFO_MAP_IDS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10417) 		offsetof(struct bpf_prog_info, map_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10418) 		offsetof(struct bpf_prog_info, nr_map_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10419) 		-(int)sizeof(__u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10420) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10421) 	[BPF_PROG_INFO_JITED_KSYMS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10422) 		offsetof(struct bpf_prog_info, jited_ksyms),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10423) 		offsetof(struct bpf_prog_info, nr_jited_ksyms),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10424) 		-(int)sizeof(__u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10425) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10426) 	[BPF_PROG_INFO_JITED_FUNC_LENS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10427) 		offsetof(struct bpf_prog_info, jited_func_lens),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10428) 		offsetof(struct bpf_prog_info, nr_jited_func_lens),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10429) 		-(int)sizeof(__u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10430) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10431) 	[BPF_PROG_INFO_FUNC_INFO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10432) 		offsetof(struct bpf_prog_info, func_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10433) 		offsetof(struct bpf_prog_info, nr_func_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10434) 		offsetof(struct bpf_prog_info, func_info_rec_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10435) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10436) 	[BPF_PROG_INFO_LINE_INFO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10437) 		offsetof(struct bpf_prog_info, line_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10438) 		offsetof(struct bpf_prog_info, nr_line_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10439) 		offsetof(struct bpf_prog_info, line_info_rec_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10440) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10441) 	[BPF_PROG_INFO_JITED_LINE_INFO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10442) 		offsetof(struct bpf_prog_info, jited_line_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10443) 		offsetof(struct bpf_prog_info, nr_jited_line_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10444) 		offsetof(struct bpf_prog_info, jited_line_info_rec_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10445) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10446) 	[BPF_PROG_INFO_PROG_TAGS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10447) 		offsetof(struct bpf_prog_info, prog_tags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10448) 		offsetof(struct bpf_prog_info, nr_prog_tags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10449) 		-(int)sizeof(__u8) * BPF_TAG_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10450) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10452) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10454) static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10455) 					   int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10457) 	__u32 *array = (__u32 *)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10459) 	if (offset >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10460) 		return array[offset / sizeof(__u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10461) 	return -(int)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10464) static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10465) 					   int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10467) 	__u64 *array = (__u64 *)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10469) 	if (offset >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10470) 		return array[offset / sizeof(__u64)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10471) 	return -(int)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10474) static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10475) 					 __u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10477) 	__u32 *array = (__u32 *)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10479) 	if (offset >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10480) 		array[offset / sizeof(__u32)] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10483) static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10484) 					 __u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10486) 	__u64 *array = (__u64 *)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10488) 	if (offset >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10489) 		array[offset / sizeof(__u64)] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10492) struct bpf_prog_info_linear *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10493) bpf_program__get_prog_info_linear(int fd, __u64 arrays)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10495) 	struct bpf_prog_info_linear *info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10496) 	struct bpf_prog_info info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10497) 	__u32 info_len = sizeof(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10498) 	__u32 data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10499) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10500) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10502) 	if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10503) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10505) 	/* step 1: get array dimensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10506) 	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10507) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10508) 		pr_debug("can't get prog info: %s", strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10509) 		return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10512) 	/* step 2: calculate total size of all arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10513) 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10514) 		bool include_array = (arrays & (1UL << i)) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10515) 		struct bpf_prog_info_array_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10516) 		__u32 count, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10518) 		desc = bpf_prog_info_array_desc + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10520) 		/* kernel is too old to support this field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10521) 		if (info_len < desc->array_offset + sizeof(__u32) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10522) 		    info_len < desc->count_offset + sizeof(__u32) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10523) 		    (desc->size_offset > 0 && info_len < desc->size_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10524) 			include_array = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10526) 		if (!include_array) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10527) 			arrays &= ~(1UL << i);	/* clear the bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10528) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10529) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10531) 		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10532) 		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10534) 		data_len += count * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10537) 	/* step 3: allocate continuous memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10538) 	data_len = roundup(data_len, sizeof(__u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10539) 	info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10540) 	if (!info_linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10541) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10543) 	/* step 4: fill data to info_linear->info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10544) 	info_linear->arrays = arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10545) 	memset(&info_linear->info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10546) 	ptr = info_linear->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10548) 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10549) 		struct bpf_prog_info_array_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10550) 		__u32 count, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10552) 		if ((arrays & (1UL << i)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10553) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10555) 		desc  = bpf_prog_info_array_desc + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10556) 		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10557) 		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10558) 		bpf_prog_info_set_offset_u32(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10559) 					     desc->count_offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10560) 		bpf_prog_info_set_offset_u32(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10561) 					     desc->size_offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10562) 		bpf_prog_info_set_offset_u64(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10563) 					     desc->array_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10564) 					     ptr_to_u64(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10565) 		ptr += count * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10568) 	/* step 5: call syscall again to get required arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10569) 	err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10570) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10571) 		pr_debug("can't get prog info: %s", strerror(errno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10572) 		free(info_linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10573) 		return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10576) 	/* step 6: verify the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10577) 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10578) 		struct bpf_prog_info_array_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10579) 		__u32 v1, v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10581) 		if ((arrays & (1UL << i)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10582) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10584) 		desc = bpf_prog_info_array_desc + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10585) 		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10586) 		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10587) 						   desc->count_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10588) 		if (v1 != v2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10589) 			pr_warn("%s: mismatch in element count\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10591) 		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10592) 		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10593) 						   desc->size_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10594) 		if (v1 != v2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10595) 			pr_warn("%s: mismatch in rec size\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10598) 	/* step 7: update info_len and data_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10599) 	info_linear->info_len = sizeof(struct bpf_prog_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10600) 	info_linear->data_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10602) 	return info_linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10605) void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10607) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10609) 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10610) 		struct bpf_prog_info_array_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10611) 		__u64 addr, offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10613) 		if ((info_linear->arrays & (1UL << i)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10614) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10616) 		desc = bpf_prog_info_array_desc + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10617) 		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10618) 						     desc->array_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10619) 		offs = addr - ptr_to_u64(info_linear->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10620) 		bpf_prog_info_set_offset_u64(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10621) 					     desc->array_offset, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10625) void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10627) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10629) 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10630) 		struct bpf_prog_info_array_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10631) 		__u64 addr, offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10633) 		if ((info_linear->arrays & (1UL << i)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10634) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10636) 		desc = bpf_prog_info_array_desc + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10637) 		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10638) 						     desc->array_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10639) 		addr = offs + ptr_to_u64(info_linear->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10640) 		bpf_prog_info_set_offset_u64(&info_linear->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10641) 					     desc->array_offset, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10645) int bpf_program__set_attach_target(struct bpf_program *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10646) 				   int attach_prog_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10647) 				   const char *attach_func_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10649) 	int btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10651) 	if (!prog || attach_prog_fd < 0 || !attach_func_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10652) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10654) 	if (attach_prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10655) 		btf_id = libbpf_find_prog_btf_id(attach_func_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10656) 						 attach_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10657) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10658) 		btf_id = libbpf_find_vmlinux_btf_id(attach_func_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10659) 						    prog->expected_attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10661) 	if (btf_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10662) 		return btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10664) 	prog->attach_btf_id = btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10665) 	prog->attach_prog_fd = attach_prog_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10666) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10669) int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10671) 	int err = 0, n, len, start, end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10672) 	bool *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10674) 	*mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10675) 	*mask_sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10677) 	/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10678) 	while (*s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10679) 		if (*s == ',' || *s == '\n') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10680) 			s++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10681) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10683) 		n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10684) 		if (n <= 0 || n > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10685) 			pr_warn("Failed to get CPU range %s: %d\n", s, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10686) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10687) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10688) 		} else if (n == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10689) 			end = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10691) 		if (start < 0 || start > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10692) 			pr_warn("Invalid CPU range [%d,%d] in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10693) 				start, end, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10694) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10695) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10697) 		tmp = realloc(*mask, end + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10698) 		if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10699) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10700) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10701) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10702) 		*mask = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10703) 		memset(tmp + *mask_sz, 0, start - *mask_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10704) 		memset(tmp + start, 1, end - start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10705) 		*mask_sz = end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10706) 		s += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10708) 	if (!*mask_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10709) 		pr_warn("Empty CPU range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10710) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10712) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10713) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10714) 	free(*mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10715) 	*mask = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10716) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10719) int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10721) 	int fd, err = 0, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10722) 	char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10724) 	fd = open(fcpu, O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10725) 	if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10726) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10727) 		pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10728) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10730) 	len = read(fd, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10731) 	close(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10732) 	if (len <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10733) 		err = len ? -errno : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10734) 		pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10735) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10737) 	if (len >= sizeof(buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10738) 		pr_warn("CPU mask is too big in file %s\n", fcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10739) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10740) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10741) 	buf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10743) 	return parse_cpu_mask_str(buf, mask, mask_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10746) int libbpf_num_possible_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10748) 	static const char *fcpu = "/sys/devices/system/cpu/possible";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10749) 	static int cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10750) 	int err, n, i, tmp_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10751) 	bool *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10753) 	tmp_cpus = READ_ONCE(cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10754) 	if (tmp_cpus > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10755) 		return tmp_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10757) 	err = parse_cpu_mask_file(fcpu, &mask, &n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10758) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10759) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10761) 	tmp_cpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10762) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10763) 		if (mask[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10764) 			tmp_cpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10766) 	free(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10768) 	WRITE_ONCE(cpus, tmp_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10769) 	return tmp_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10772) int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10773) 			      const struct bpf_object_open_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10775) 	DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10776) 		.object_name = s->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10777) 	);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10778) 	struct bpf_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10779) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10781) 	/* Attempt to preserve opts->object_name, unless overriden by user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10782) 	 * explicitly. Overwriting object name for skeletons is discouraged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10783) 	 * as it breaks global data maps, because they contain object name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10784) 	 * prefix as their own map name prefix. When skeleton is generated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10785) 	 * bpftool is making an assumption that this name will stay the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10786) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10787) 	if (opts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10788) 		memcpy(&skel_opts, opts, sizeof(*opts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10789) 		if (!opts->object_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10790) 			skel_opts.object_name = s->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10793) 	obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10794) 	if (IS_ERR(obj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10795) 		pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10796) 			s->name, PTR_ERR(obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10797) 		return PTR_ERR(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10800) 	*s->obj = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10802) 	for (i = 0; i < s->map_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10803) 		struct bpf_map **map = s->maps[i].map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10804) 		const char *name = s->maps[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10805) 		void **mmaped = s->maps[i].mmaped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10807) 		*map = bpf_object__find_map_by_name(obj, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10808) 		if (!*map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10809) 			pr_warn("failed to find skeleton map '%s'\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10810) 			return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10811) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10813) 		/* externs shouldn't be pre-setup from user code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10814) 		if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10815) 			*mmaped = (*map)->mmaped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10818) 	for (i = 0; i < s->prog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10819) 		struct bpf_program **prog = s->progs[i].prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10820) 		const char *name = s->progs[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10822) 		*prog = bpf_object__find_program_by_name(obj, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10823) 		if (!*prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10824) 			pr_warn("failed to find skeleton program '%s'\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10825) 			return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10829) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10832) int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10834) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10836) 	err = bpf_object__load(*s->obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10837) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10838) 		pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10839) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10842) 	for (i = 0; i < s->map_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10843) 		struct bpf_map *map = *s->maps[i].map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10844) 		size_t mmap_sz = bpf_map_mmap_sz(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10845) 		int prot, map_fd = bpf_map__fd(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10846) 		void **mmaped = s->maps[i].mmaped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10848) 		if (!mmaped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10849) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10851) 		if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10852) 			*mmaped = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10853) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10856) 		if (map->def.map_flags & BPF_F_RDONLY_PROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10857) 			prot = PROT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10858) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10859) 			prot = PROT_READ | PROT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10861) 		/* Remap anonymous mmap()-ed "map initialization image" as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10862) 		 * a BPF map-backed mmap()-ed memory, but preserving the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10863) 		 * memory address. This will cause kernel to change process'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10864) 		 * page table to point to a different piece of kernel memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10865) 		 * but from userspace point of view memory address (and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10866) 		 * contents, being identical at this point) will stay the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10867) 		 * same. This mapping will be released by bpf_object__close()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10868) 		 * as per normal clean up procedure, so we don't need to worry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10869) 		 * about it from skeleton's clean up perspective.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10870) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10871) 		*mmaped = mmap(map->mmaped, mmap_sz, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10872) 				MAP_SHARED | MAP_FIXED, map_fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10873) 		if (*mmaped == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10874) 			err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10875) 			*mmaped = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10876) 			pr_warn("failed to re-mmap() map '%s': %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10877) 				 bpf_map__name(map), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10878) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10885) int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10887) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10889) 	for (i = 0; i < s->prog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10890) 		struct bpf_program *prog = *s->progs[i].prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10891) 		struct bpf_link **link = s->progs[i].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10892) 		const struct bpf_sec_def *sec_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10894) 		if (!prog->load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10895) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10897) 		sec_def = find_sec_def(prog->sec_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10898) 		if (!sec_def || !sec_def->attach_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10899) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10901) 		*link = sec_def->attach_fn(sec_def, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10902) 		if (IS_ERR(*link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10903) 			pr_warn("failed to auto-attach program '%s': %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10904) 				bpf_program__name(prog), PTR_ERR(*link));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10905) 			return PTR_ERR(*link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10906) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10909) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10912) void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10914) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10916) 	for (i = 0; i < s->prog_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10917) 		struct bpf_link **link = s->progs[i].link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10919) 		bpf_link__destroy(*link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10920) 		*link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10924) void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10926) 	if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10927) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10929) 	if (s->progs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10930) 		bpf_object__detach_skeleton(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10931) 	if (s->obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10932) 		bpf_object__close(*s->obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10933) 	free(s->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10934) 	free(s->progs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10935) 	free(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10936) }