^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/bpf_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bpf_lirc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bpf_verifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/anon_inodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/fdtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/license.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cred.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/timekeeping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <uapi/linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/bpf_lsm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/bpf-netns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/rcupdate_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <trace/hooks/syscall_check.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) IS_FD_HASH(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) DEFINE_PER_CPU(int, bpf_prog_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static DEFINE_IDR(prog_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static DEFINE_SPINLOCK(prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static DEFINE_IDR(map_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DEFINE_SPINLOCK(map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static DEFINE_IDR(link_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static DEFINE_SPINLOCK(link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int sysctl_unprivileged_bpf_disabled __read_mostly =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static const struct bpf_map_ops * const bpf_map_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define BPF_MAP_TYPE(_id, _ops) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) [_id] = &_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define BPF_LINK_TYPE(_id, _name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/bpf_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #undef BPF_PROG_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #undef BPF_MAP_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #undef BPF_LINK_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * If we're handed a bigger struct than we know of, ensure all the unknown bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * are 0 - i.e. new user-space does not rely on any kernel feature extensions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * we don't know about yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * There is a ToCToU between this function call and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * copy_from_user() call. However, this is not a concern since this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * meant to be a future-proofing of bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int bpf_check_uarg_tail_zero(void __user *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) size_t expected_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) size_t actual_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned char __user *addr = uaddr + expected_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (actual_size <= expected_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) res = check_zeroed_user(addr, actual_size - expected_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return res ? 0 : -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) const struct bpf_map_ops bpf_map_offload_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .map_meta_equal = bpf_map_meta_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .map_alloc = bpf_map_offload_map_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .map_free = bpf_map_offload_map_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .map_check_btf = map_check_no_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) const struct bpf_map_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u32 type = attr->map_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (type >= ARRAY_SIZE(bpf_map_types))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ops = bpf_map_types[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (ops->map_alloc_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) err = ops->map_alloc_check(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (attr->map_ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ops = &bpf_map_offload_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) map = ops->map_alloc(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) map->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) map->map_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void bpf_map_write_active_inc(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) atomic64_inc(&map->writecnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void bpf_map_write_active_dec(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) atomic64_dec(&map->writecnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bool bpf_map_write_active(const struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return atomic64_read(&map->writecnt) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static u32 bpf_map_value_size(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return round_up(map->value_size, 8) * num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) else if (IS_FD_MAP(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return map->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void maybe_wait_bpf_programs(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Wait for any running BPF programs to complete so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * userspace, when we return to it, knows that all programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * that could be running use the new map value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void *value, __u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Need to create a kthread, thus must support schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (bpf_map_is_dev_bound(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return bpf_map_offload_update_elem(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return map->ops->map_update_elem(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) map->map_type == BPF_MAP_TYPE_SOCKMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return sock_map_update_elem_sys(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) } else if (IS_FD_PROG_ARRAY(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return bpf_fd_array_map_update_elem(map, f.file, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) bpf_disable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) err = bpf_percpu_hash_update(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) err = bpf_percpu_array_update(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) err = bpf_percpu_cgroup_storage_update(map, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) } else if (IS_FD_ARRAY(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) err = bpf_fd_array_map_update_elem(map, f.file, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* rcu_read_lock() is not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) err = bpf_fd_reuseport_array_update_elem(map, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) map->map_type == BPF_MAP_TYPE_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) err = map->ops->map_push_elem(map, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err = map->ops->map_update_elem(map, key, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) bpf_enable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) maybe_wait_bpf_programs(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (bpf_map_is_dev_bound(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return bpf_map_offload_lookup_elem(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) bpf_disable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) err = bpf_percpu_hash_copy(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) err = bpf_percpu_array_copy(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) err = bpf_percpu_cgroup_storage_copy(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) err = bpf_stackmap_copy(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) err = bpf_fd_array_map_lookup_elem(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) } else if (IS_FD_HASH(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) err = bpf_fd_htab_map_lookup_elem(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) map->map_type == BPF_MAP_TYPE_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) err = map->ops->map_peek_elem(map, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* struct_ops map requires directly updating "value" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (map->ops->map_lookup_elem_sys_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ptr = map->ops->map_lookup_elem_sys_only(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ptr = map->ops->map_lookup_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (IS_ERR(ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) err = PTR_ERR(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) } else if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (flags & BPF_F_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* lock 'ptr' and copy everything but lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) copy_map_value_locked(map, value, ptr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) copy_map_value(map, value, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* mask lock, since value wasn't zero inited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) check_and_init_map_lock(map, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bpf_enable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) maybe_wait_bpf_programs(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* We really just want to fail instead of triggering OOM killer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * which is used for lower order allocation requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * It has been observed that higher order allocation requests done by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * vmalloc with __GFP_NORETRY being set might fail due to not trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * to reclaim memory from the page cache, thus we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * __GFP_RETRY_MAYFAIL to avoid such situations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (size >= SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* kmalloc()'ed memory can't be mmap()'ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (mmapable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) BUG_ON(!PAGE_ALIGNED(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) align = SHMLBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) flags = VM_USERMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (area != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) flags, numa_node, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) void *bpf_map_area_alloc(u64 size, int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return __bpf_map_area_alloc(size, numa_node, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return __bpf_map_area_alloc(size, numa_node, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) void bpf_map_area_free(void *area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) kvfree(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static u32 bpf_map_flags_retain_permanent(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Some map creation flags are not tied to the map object but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * rather to the map fd instead, so they have no meaning upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * map object inspection since multiple file descriptors with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * different (access) properties can exist here. Thus, given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * this has zero meaning for the map itself, lets clear these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * from here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) map->map_type = attr->map_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) map->key_size = attr->key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) map->value_size = attr->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) map->max_entries = attr->max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) map->numa_node = bpf_map_attr_numa_node(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static int bpf_charge_memlock(struct user_struct *user, u32 pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) atomic_long_sub(pages, &user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) atomic_long_sub(pages, &user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct user_struct *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (size >= U32_MAX - PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) user = get_current_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ret = bpf_charge_memlock(user, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) free_uid(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) mem->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mem->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void bpf_map_charge_finish(struct bpf_map_memory *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) bpf_uncharge_memlock(mem->user, mem->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) free_uid(mem->user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) void bpf_map_charge_move(struct bpf_map_memory *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct bpf_map_memory *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Make sure src will not be used for the redundant uncharging. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) memset(src, 0, sizeof(struct bpf_map_memory));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = bpf_charge_memlock(map->memory.user, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) map->memory.pages += pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) bpf_uncharge_memlock(map->memory.user, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) map->memory.pages -= pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int bpf_map_alloc_id(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) spin_lock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (id > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) map->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) spin_unlock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (WARN_ON_ONCE(!id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return id > 0 ? 0 : id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Offloaded maps are removed from the IDR store when their device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * disappears - even if someone holds an fd to them they are unusable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * the memory is gone, all ops will fail; they are simply waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * refcnt to drop to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!map->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock_irqsave(&map_idr_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) __acquire(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) idr_remove(&map_idr, map->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) map->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_unlock_irqrestore(&map_idr_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) __release(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* called from workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void bpf_map_free_deferred(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct bpf_map *map = container_of(work, struct bpf_map, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct bpf_map_memory mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) bpf_map_charge_move(&mem, &map->memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) security_bpf_map_free(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* implementation dependent freeing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) map->ops->map_free(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) bpf_map_charge_finish(&mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static void bpf_map_put_uref(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (atomic64_dec_and_test(&map->usercnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (map->ops->map_release_uref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) map->ops->map_release_uref(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* decrement map refcnt and schedule it for freeing via workqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * (unrelying map implementation ops->map_free() might sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (atomic64_dec_and_test(&map->refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* bpf_map_free_id() must be called first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) bpf_map_free_id(map, do_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) btf_put(map->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) INIT_WORK(&map->work, bpf_map_free_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) schedule_work(&map->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) void bpf_map_put(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) __bpf_map_put(map, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) EXPORT_SYMBOL_GPL(bpf_map_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void bpf_map_put_with_uref(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bpf_map_put_uref(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) bpf_map_put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static int bpf_map_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct bpf_map *map = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (map->ops->map_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) map->ops->map_release(map, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) bpf_map_put_with_uref(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) fmode_t mode = f.file->f_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Our file permissions may have been overridden by global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * map permissions facing syscall side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (READ_ONCE(map->frozen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mode &= ~FMODE_CAN_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) const struct bpf_map *map = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) const struct bpf_array *array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) u32 type = 0, jited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) array = container_of(map, struct bpf_array, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_lock(&array->aux->owner.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) type = array->aux->owner.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) jited = array->aux->owner.jited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_unlock(&array->aux->owner.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) "map_type:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) "key_size:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) "value_size:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) "max_entries:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) "map_flags:\t%#x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) "memlock:\t%llu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) "map_id:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) "frozen:\t%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) map->map_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) map->key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) map->value_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) map->max_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) map->map_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) map->memory.pages * 1ULL << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) map->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) READ_ONCE(map->frozen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) seq_printf(m, "owner_prog_type:\t%u\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) seq_printf(m, "owner_jited:\t%u\n", jited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* We need this handler such that alloc_file() enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * f_mode with FMODE_CAN_READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) size_t siz, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* We need this handler such that alloc_file() enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * f_mode with FMODE_CAN_WRITE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* called for any extra memory-mapped regions (except initial) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void bpf_map_mmap_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct bpf_map *map = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (vma->vm_flags & VM_MAYWRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bpf_map_write_active_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* called for all unmapped memory region (including initial) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static void bpf_map_mmap_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct bpf_map *map = vma->vm_file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (vma->vm_flags & VM_MAYWRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bpf_map_write_active_dec(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static const struct vm_operations_struct bpf_map_default_vmops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) .open = bpf_map_mmap_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) .close = bpf_map_mmap_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct bpf_map *map = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!map->ops->map_mmap || map_value_has_spin_lock(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!(vma->vm_flags & VM_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) mutex_lock(&map->freeze_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (vma->vm_flags & VM_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (map->frozen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* map is meant to be read-only, so do not allow mapping as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * writable, because it's possible to leak a writable page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * reference and allows user-space to still modify it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * freezing, while verifier will assume contents do not change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (map->map_flags & BPF_F_RDONLY_PROG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* set default open/close callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) vma->vm_ops = &bpf_map_default_vmops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) vma->vm_private_data = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) vma->vm_flags &= ~VM_MAYEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* disallow re-mapping with PROT_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) vma->vm_flags &= ~VM_MAYWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) err = map->ops->map_mmap(map, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (vma->vm_flags & VM_MAYWRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bpf_map_write_active_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) mutex_unlock(&map->freeze_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct bpf_map *map = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (map->ops->map_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return map->ops->map_poll(map, filp, pts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return EPOLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) const struct file_operations bpf_map_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) .show_fdinfo = bpf_map_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .release = bpf_map_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .read = bpf_dummy_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) .write = bpf_dummy_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) .mmap = bpf_map_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) .poll = bpf_map_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int bpf_map_new_fd(struct bpf_map *map, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = security_bpf_map(map, OPEN_FMODE(flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) flags | O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int bpf_get_file_flag(int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (flags & BPF_F_RDONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return O_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (flags & BPF_F_WRONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return O_WRONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return O_RDWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* helper macro to check that unused fields 'union bpf_attr' are zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #define CHECK_ATTR(CMD) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) sizeof(attr->CMD##_LAST_FIELD), 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) sizeof(*attr) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) sizeof(attr->CMD##_LAST_FIELD)) != NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* dst and src must have at least "size" number of bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Return strlen on success and < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) const char *end = src + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) const char *orig_src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) memset(dst, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Copy all isalnum(), '_' and '.' chars. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) while (src < end && *src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (!isalnum(*src) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) *src != '_' && *src != '.')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) *dst++ = *src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* No '\0' found in "size" number of bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (src == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return src - orig_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) int map_check_no_btf(const struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) const struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) const struct btf_type *key_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) const struct btf_type *value_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int map_check_btf(struct bpf_map *map, const struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) u32 btf_key_id, u32 btf_value_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) const struct btf_type *key_type, *value_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u32 key_size, value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Some maps allow key to be unspecified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (btf_key_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!key_type || key_size != map->key_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) key_type = btf_type_by_id(btf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!map->ops->map_check_btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!value_type || value_size != map->value_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) map->spin_lock_off = btf_find_spin_lock(btf, value_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (map->map_flags & BPF_F_RDONLY_PROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (map->map_type != BPF_MAP_TYPE_HASH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) map->map_type != BPF_MAP_TYPE_ARRAY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) map->value_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) WARN_ONCE(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) "verifier bug spin_lock_off %d value_size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) map->spin_lock_off, map->value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (map->ops->map_check_btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ret = map->ops->map_check_btf(map, btf, key_type, value_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* called via syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static int map_create(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int numa_node = bpf_map_attr_numa_node(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct bpf_map_memory mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) err = CHECK_ATTR(BPF_MAP_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (attr->btf_vmlinux_value_type_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) attr->btf_key_type_id || attr->btf_value_type_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) f_flags = bpf_get_file_flag(attr->map_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (f_flags < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (numa_node != NUMA_NO_NODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ((unsigned int)numa_node >= nr_node_ids ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) !node_online(numa_node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) map = find_and_alloc_map(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) err = bpf_obj_name_cpy(map->name, attr->map_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) sizeof(attr->map_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) atomic64_set(&map->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) atomic64_set(&map->usercnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mutex_init(&map->freeze_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) map->spin_lock_off = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (attr->btf_key_type_id || attr->btf_value_type_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Even the map's value is a kernel's struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * the bpf_prog.o must have BTF to begin with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * to figure out the corresponding kernel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * counter part. Thus, attr->btf_fd has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * to be valid also.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) attr->btf_vmlinux_value_type_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct btf *btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) btf = btf_get_by_fd(attr->btf_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (IS_ERR(btf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) err = PTR_ERR(btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) map->btf = btf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (attr->btf_value_type_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) err = map_check_btf(map, btf, attr->btf_key_type_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) attr->btf_value_type_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) map->btf_key_type_id = attr->btf_key_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) map->btf_value_type_id = attr->btf_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) map->btf_vmlinux_value_type_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) attr->btf_vmlinux_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) err = security_bpf_map_alloc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) goto free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) err = bpf_map_alloc_id(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto free_map_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) err = bpf_map_new_fd(map, f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* failed to allocate fd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * bpf_map_put_with_uref() is needed because the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * bpf_map_alloc_id() has published the map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * to the userspace and the userspace may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) bpf_map_put_with_uref(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) free_map_sec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) security_bpf_map_free(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) free_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) btf_put(map->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) bpf_map_charge_move(&mem, &map->memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) map->ops->map_free(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bpf_map_charge_finish(&mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* if error is returned, fd is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * On success caller should complete fd access with matching fdput()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct bpf_map *__bpf_map_get(struct fd f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (f.file->f_op != &bpf_map_fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return f.file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) void bpf_map_inc(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) atomic64_inc(&map->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) EXPORT_SYMBOL_GPL(bpf_map_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) void bpf_map_inc_with_uref(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) atomic64_inc(&map->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) atomic64_inc(&map->usercnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct bpf_map *bpf_map_get(u32 ufd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct fd f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) bpf_map_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct bpf_map *bpf_map_get_with_uref(u32 ufd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct fd f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) bpf_map_inc_with_uref(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* map_idr_lock should have been held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) int refold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!refold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (uref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) atomic64_inc(&map->usercnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) spin_lock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) map = __bpf_map_inc_not_zero(map, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) spin_unlock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void *__bpf_copy_key(void __user *ukey, u64 key_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (key_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return memdup_user(ukey, key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ukey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* last field in 'union bpf_attr' used by this command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int map_lookup_elem(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) void __user *ukey = u64_to_user_ptr(attr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) void __user *uvalue = u64_to_user_ptr(attr->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) int ufd = attr->map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) void *key, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) u32 value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (attr->flags & ~BPF_F_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if ((attr->flags & BPF_F_LOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) !map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) key = __bpf_copy_key(ukey, map->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (IS_ERR(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) err = PTR_ERR(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) value_size = bpf_map_value_size(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto free_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) err = bpf_map_copy_value(map, key, value, attr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (copy_to_user(uvalue, value, value_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) free_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) kfree(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) free_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int map_update_elem(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) void __user *ukey = u64_to_user_ptr(attr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) void __user *uvalue = u64_to_user_ptr(attr->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int ufd = attr->map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) void *key, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) u32 value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) bpf_map_write_active_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if ((attr->flags & BPF_F_LOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) !map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) key = __bpf_copy_key(ukey, map->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (IS_ERR(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) err = PTR_ERR(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) value_size = round_up(map->value_size, 8) * num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) value_size = map->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) goto free_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (copy_from_user(value, uvalue, value_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) err = bpf_map_update_value(map, f, key, value, attr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) free_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) kfree(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) free_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) bpf_map_write_active_dec(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int map_delete_elem(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) void __user *ukey = u64_to_user_ptr(attr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) int ufd = attr->map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) void *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) bpf_map_write_active_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) key = __bpf_copy_key(ukey, map->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (IS_ERR(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) err = PTR_ERR(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (bpf_map_is_dev_bound(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) err = bpf_map_offload_delete_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) } else if (IS_FD_PROG_ARRAY(map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* These maps require sleepable context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) err = map->ops->map_delete_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) bpf_disable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) err = map->ops->map_delete_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) bpf_enable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) maybe_wait_bpf_programs(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) bpf_map_write_active_dec(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* last field in 'union bpf_attr' used by this command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static int map_get_next_key(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) void __user *ukey = u64_to_user_ptr(attr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) void __user *unext_key = u64_to_user_ptr(attr->next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) int ufd = attr->map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) void *key, *next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (ukey) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) key = __bpf_copy_key(ukey, map->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (IS_ERR(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) err = PTR_ERR(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) next_key = kmalloc(map->key_size, GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!next_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) goto free_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (bpf_map_is_dev_bound(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) err = bpf_map_offload_get_next_key(map, key, next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) err = map->ops->map_get_next_key(map, key, next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) goto free_next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) if (copy_to_user(unext_key, next_key, map->key_size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) goto free_next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) free_next_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) kfree(next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) free_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int generic_map_delete_batch(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) void __user *keys = u64_to_user_ptr(attr->batch.keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) u32 cp, max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) void *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (attr->batch.elem_flags & ~BPF_F_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if ((attr->batch.elem_flags & BPF_F_LOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) !map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) max_count = attr->batch.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) for (cp = 0; cp < max_count; cp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (copy_from_user(key, keys + cp * map->key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) map->key_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (bpf_map_is_dev_bound(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) err = bpf_map_offload_delete_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) bpf_disable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) err = map->ops->map_delete_elem(map, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) bpf_enable_instrumentation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) maybe_wait_bpf_programs(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) int generic_map_update_batch(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) void __user *values = u64_to_user_ptr(attr->batch.values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) void __user *keys = u64_to_user_ptr(attr->batch.keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) u32 value_size, cp, max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) int ufd = attr->batch.map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) void *key, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (attr->batch.elem_flags & ~BPF_F_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if ((attr->batch.elem_flags & BPF_F_LOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) !map_value_has_spin_lock(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) value_size = bpf_map_value_size(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) max_count = attr->batch.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (!max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (!value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) for (cp = 0; cp < max_count; cp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (copy_from_user(key, keys + cp * map->key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) map->key_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) copy_from_user(value, values + cp * value_size, value_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) err = bpf_map_update_value(map, f, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) attr->batch.elem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) kfree(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) #define MAP_LOOKUP_RETRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int generic_map_lookup_batch(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) void __user *values = u64_to_user_ptr(attr->batch.values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void __user *keys = u64_to_user_ptr(attr->batch.keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) void *buf, *buf_prevkey, *prev_key, *key, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int err, retry = MAP_LOOKUP_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u32 value_size, cp, max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (attr->batch.elem_flags & ~BPF_F_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if ((attr->batch.elem_flags & BPF_F_LOCK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) !map_value_has_spin_lock(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) value_size = bpf_map_value_size(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) max_count = attr->batch.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (!max_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (put_user(0, &uattr->batch.count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (!buf_prevkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) kfree(buf_prevkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) prev_key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) key = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) value = key + map->key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (ubatch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) prev_key = buf_prevkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) for (cp = 0; cp < max_count;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) err = map->ops->map_get_next_key(map, prev_key, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) err = bpf_map_copy_value(map, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) attr->batch.elem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (err == -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (copy_to_user(keys + cp * map->key_size, key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) map->key_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (copy_to_user(values + cp * value_size, value, value_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!prev_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) prev_key = buf_prevkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) swap(prev_key, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) retry = MAP_LOOKUP_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) cp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (err == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) goto free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) (cp && copy_to_user(uobatch, prev_key, map->key_size))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) kfree(buf_prevkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static int map_lookup_and_delete_elem(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) void __user *ukey = u64_to_user_ptr(attr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) void __user *uvalue = u64_to_user_ptr(attr->value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) int ufd = attr->map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) void *key, *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) u32 value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) bpf_map_write_active_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) key = __bpf_copy_key(ukey, map->key_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (IS_ERR(key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) err = PTR_ERR(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) value_size = map->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto free_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (map->map_type == BPF_MAP_TYPE_QUEUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) map->map_type == BPF_MAP_TYPE_STACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) err = map->ops->map_pop_elem(map, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (copy_to_user(uvalue, value, value_size) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) goto free_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) free_value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) kfree(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) free_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) kfree(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) bpf_map_write_active_dec(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) #define BPF_MAP_FREEZE_LAST_FIELD map_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static int map_freeze(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) int err = 0, ufd = attr->map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (CHECK_ATTR(BPF_MAP_FREEZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) mutex_lock(&map->freeze_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (bpf_map_write_active(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (READ_ONCE(map->frozen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (!bpf_capable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) WRITE_ONCE(map->frozen, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) mutex_unlock(&map->freeze_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static const struct bpf_prog_ops * const bpf_prog_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) [_id] = & _name ## _prog_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) #define BPF_MAP_TYPE(_id, _ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) #define BPF_LINK_TYPE(_id, _name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) #include <linux/bpf_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) #undef BPF_PROG_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) #undef BPF_MAP_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) #undef BPF_LINK_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) const struct bpf_prog_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (type >= ARRAY_SIZE(bpf_prog_types))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) ops = bpf_prog_types[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (!bpf_prog_is_dev_bound(prog->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) prog->aux->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) prog->aux->ops = &bpf_offload_prog_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) prog->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) enum bpf_audit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) BPF_AUDIT_LOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) BPF_AUDIT_UNLOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) BPF_AUDIT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) [BPF_AUDIT_LOAD] = "LOAD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) [BPF_AUDIT_UNLOAD] = "UNLOAD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct audit_context *ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct audit_buffer *ab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (audit_enabled == AUDIT_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (op == BPF_AUDIT_LOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) ctx = audit_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (unlikely(!ab))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) audit_log_format(ab, "prog-id=%u op=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) prog->aux->id, bpf_audit_str[op]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) audit_log_end(ab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) int __bpf_prog_charge(struct user_struct *user, u32 pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) unsigned long user_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) user_bufs = atomic_long_add_return(pages, &user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (user_bufs > memlock_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) atomic_long_sub(pages, &user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) atomic_long_sub(pages, &user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static int bpf_prog_charge_memlock(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct user_struct *user = get_current_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ret = __bpf_prog_charge(user, prog->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) free_uid(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) prog->aux->user = user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct user_struct *user = prog->aux->user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) __bpf_prog_uncharge(user, prog->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) free_uid(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static int bpf_prog_alloc_id(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) spin_lock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (id > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) prog->aux->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) spin_unlock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* id is in [1, INT_MAX) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (WARN_ON_ONCE(!id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return id > 0 ? 0 : id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /* cBPF to eBPF migrations are currently not in the idr store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * Offloaded programs are removed from the store when their device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * disappears - even if someone grabs an fd to them they are unusable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * simply waiting for refcnt to drop to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (!prog->aux->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) spin_lock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) __acquire(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) idr_remove(&prog_idr, prog->aux->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) prog->aux->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) spin_unlock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) __release(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) static void __bpf_prog_put_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) kvfree(aux->func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) kfree(aux->func_info_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) bpf_prog_uncharge_memlock(aux->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) security_bpf_prog_free(aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) bpf_prog_free(aux->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bpf_prog_kallsyms_del_all(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) btf_put(prog->aux->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) bpf_prog_free_linfo(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (deferred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (prog->aux->sleepable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) __bpf_prog_put_rcu(&prog->aux->rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (atomic64_dec_and_test(&prog->aux->refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* bpf_prog_free_id() must be called first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) bpf_prog_free_id(prog, do_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) __bpf_prog_put_noref(prog, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) void bpf_prog_put(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) __bpf_prog_put(prog, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) EXPORT_SYMBOL_GPL(bpf_prog_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static int bpf_prog_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct bpf_prog *prog = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static void bpf_prog_get_stats(const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct bpf_prog_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) u64 nsecs = 0, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) const struct bpf_prog_stats *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) u64 tnsecs, tcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) st = per_cpu_ptr(prog->aux->stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) start = u64_stats_fetch_begin_irq(&st->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) tnsecs = st->nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) tcnt = st->cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) } while (u64_stats_fetch_retry_irq(&st->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) nsecs += tnsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) cnt += tcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) stats->nsecs = nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) stats->cnt = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) const struct bpf_prog *prog = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct bpf_prog_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) bpf_prog_get_stats(prog, &stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) "prog_type:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) "prog_jited:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) "prog_tag:\t%s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) "memlock:\t%llu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) "prog_id:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) "run_time_ns:\t%llu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) "run_cnt:\t%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) prog->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) prog->jited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) prog_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) prog->pages * 1ULL << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) prog->aux->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) stats.nsecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) stats.cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) const struct file_operations bpf_prog_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) .show_fdinfo = bpf_prog_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) .release = bpf_prog_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) .read = bpf_dummy_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) .write = bpf_dummy_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) int bpf_prog_new_fd(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) ret = security_bpf_prog(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) O_RDWR | O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) static struct bpf_prog *____bpf_prog_get(struct fd f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (f.file->f_op != &bpf_prog_fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return f.file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) void bpf_prog_add(struct bpf_prog *prog, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) atomic64_add(i, &prog->aux->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) EXPORT_SYMBOL_GPL(bpf_prog_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) void bpf_prog_sub(struct bpf_prog *prog, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /* Only to be used for undoing previous bpf_prog_add() in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * error path. We still know that another entity in our call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * path holds a reference to the program, thus atomic_sub() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * be safely used in such cases!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) EXPORT_SYMBOL_GPL(bpf_prog_sub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) void bpf_prog_inc(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) atomic64_inc(&prog->aux->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) EXPORT_SYMBOL_GPL(bpf_prog_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /* prog_idr_lock should have been held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) int refold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (!refold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) bool bpf_prog_get_ok(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) enum bpf_prog_type *attach_type, bool attach_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* not an attachment, just a refcount inc, always allow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (!attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (prog->type != *attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) bool attach_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct fd f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) prog = ____bpf_prog_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) prog = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) bpf_prog_inc(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) struct bpf_prog *bpf_prog_get(u32 ufd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return __bpf_prog_get(ufd, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) bool attach_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return __bpf_prog_get(ufd, &type, attach_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Initially all BPF programs could be loaded w/o specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * expected_attach_type. Later for some of them specifying expected_attach_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * at load time became required so that program could be validated properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * Programs of types that are allowed to be loaded both w/ and w/o (for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * backward compatibility) expected_attach_type, should have the default attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * type assigned to expected_attach_type for the latter case, so that it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * validated later at attach time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * prog type requires it but has some attach types that have to be backward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * compatible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) switch (attr->prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * exist so checking for non-zero is the way to go here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (!attr->expected_attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) attr->expected_attach_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) BPF_CGROUP_INET_SOCK_CREATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) enum bpf_attach_type expected_attach_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) u32 btf_id, u32 prog_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (btf_id > BTF_MAX_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) switch (prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) case BPF_PROG_TYPE_LSM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) case BPF_PROG_TYPE_STRUCT_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) case BPF_PROG_TYPE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) prog_type != BPF_PROG_TYPE_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) switch (prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) switch (expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) case BPF_CGROUP_INET_SOCK_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) case BPF_CGROUP_INET_SOCK_RELEASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) case BPF_CGROUP_INET4_POST_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case BPF_CGROUP_INET6_POST_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) switch (expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) case BPF_CGROUP_INET4_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) case BPF_CGROUP_INET6_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) case BPF_CGROUP_INET4_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) case BPF_CGROUP_INET6_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) case BPF_CGROUP_INET4_GETPEERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) case BPF_CGROUP_INET6_GETPEERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) case BPF_CGROUP_INET4_GETSOCKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) case BPF_CGROUP_INET6_GETSOCKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) case BPF_CGROUP_UDP4_SENDMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) case BPF_CGROUP_UDP6_SENDMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) case BPF_CGROUP_UDP4_RECVMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) case BPF_CGROUP_UDP6_RECVMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) switch (expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) case BPF_CGROUP_INET_INGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) case BPF_CGROUP_INET_EGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) switch (expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) case BPF_CGROUP_SETSOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) case BPF_CGROUP_GETSOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) case BPF_PROG_TYPE_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (expected_attach_type == BPF_SK_LOOKUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) case BPF_PROG_TYPE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (expected_attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) switch (prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) case BPF_PROG_TYPE_SCHED_CLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) case BPF_PROG_TYPE_SCHED_ACT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) case BPF_PROG_TYPE_XDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) case BPF_PROG_TYPE_LWT_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) case BPF_PROG_TYPE_LWT_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) case BPF_PROG_TYPE_LWT_XMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) case BPF_PROG_TYPE_LWT_SEG6LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) case BPF_PROG_TYPE_SK_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) case BPF_PROG_TYPE_SK_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) case BPF_PROG_TYPE_LIRC_MODE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) case BPF_PROG_TYPE_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) case BPF_PROG_TYPE_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) case BPF_PROG_TYPE_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) case BPF_PROG_TYPE_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) case BPF_PROG_TYPE_EXT: /* extends any prog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* always unpriv */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) case BPF_PROG_TYPE_SK_REUSEPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* equivalent to SOCKET_FILTER. need CAP_BPF only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) switch (prog_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) case BPF_PROG_TYPE_KPROBE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) case BPF_PROG_TYPE_TRACEPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) case BPF_PROG_TYPE_PERF_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) case BPF_PROG_TYPE_RAW_TRACEPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case BPF_PROG_TYPE_LSM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) case BPF_PROG_TYPE_EXT: /* extends any prog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /* last field in 'union bpf_attr' used by this command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) enum bpf_prog_type type = attr->prog_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) char license[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) bool is_gpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (CHECK_ATTR(BPF_PROG_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) BPF_F_ANY_ALIGNMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) BPF_F_TEST_STATE_FREQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) BPF_F_SLEEPABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) BPF_F_TEST_RND_HI32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) !bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* copy eBPF program license from user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) sizeof(license) - 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) license[sizeof(license) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* eBPF programs must be GPL compatible to use GPL-ed functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) is_gpl = license_is_gpl_compatible(license);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (attr->insn_cnt == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) type != BPF_PROG_TYPE_CGROUP_SKB &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) !bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (is_perfmon_prog_type(type) && !perfmon_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) bpf_prog_load_fixup_attach_type(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) attr->attach_btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) attr->attach_prog_fd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) /* plain bpf_prog allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (!prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) prog->expected_attach_type = attr->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) prog->aux->attach_btf_id = attr->attach_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (attr->attach_prog_fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) struct bpf_prog *dst_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) dst_prog = bpf_prog_get(attr->attach_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (IS_ERR(dst_prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) err = PTR_ERR(dst_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) goto free_prog_nouncharge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) prog->aux->dst_prog = dst_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) prog->aux->offload_requested = !!attr->prog_ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) err = security_bpf_prog_alloc(prog->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) goto free_prog_nouncharge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) err = bpf_prog_charge_memlock(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) goto free_prog_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) prog->len = attr->insn_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) bpf_prog_insn_size(prog)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) goto free_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) prog->orig_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) prog->jited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) atomic64_set(&prog->aux->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) prog->gpl_compatible = is_gpl ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (bpf_prog_is_dev_bound(prog->aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) err = bpf_prog_offload_init(prog, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) goto free_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) /* find program type: socket_filter vs tracing_filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) err = find_prog_type(type, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) goto free_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) prog->aux->load_time = ktime_get_boottime_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) sizeof(attr->prog_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) goto free_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /* run eBPF verifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) err = bpf_check(&prog, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) goto free_used_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) prog = bpf_prog_select_runtime(prog, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) goto free_used_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) err = bpf_prog_alloc_id(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) goto free_used_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /* Upon success of bpf_prog_alloc_id(), the BPF prog is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * effectively publicly exposed. However, retrieving via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * bpf_prog_get_fd_by_id() will take another reference,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * therefore it cannot be gone underneath us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * Only for the time /after/ successful bpf_prog_new_fd()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) * and before returning to userspace, we might just hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * one reference and any parallel close on that fd could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * rip everything out. Hence, below notifications must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * happen before bpf_prog_new_fd().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * Also, any failure handling from this point onwards must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * be using bpf_prog_put() given the program is exposed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) bpf_prog_kallsyms_add(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) bpf_audit_prog(prog, BPF_AUDIT_LOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) err = bpf_prog_new_fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) free_used_maps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /* In case we have subprogs, we need to wait for a grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * period before we can tear down JIT memory since symbols
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * are already exposed under kallsyms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) __bpf_prog_put_noref(prog, prog->aux->func_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) free_prog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) bpf_prog_uncharge_memlock(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) free_prog_sec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) security_bpf_prog_free(prog->aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) free_prog_nouncharge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) bpf_prog_free(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) #define BPF_OBJ_LAST_FIELD file_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) static int bpf_obj_pin(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) static int bpf_obj_get(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) attr->file_flags & ~BPF_OBJ_FLAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) attr->file_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) const struct bpf_link_ops *ops, struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) atomic64_set(&link->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) link->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) link->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) link->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) link->prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) static void bpf_link_free_id(int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) spin_lock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) idr_remove(&link_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) spin_unlock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) /* Clean up bpf_link and corresponding anon_inode file and FD. After
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) * anon_inode's release() call. This helper marksbpf_link as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) * is not decremented, it's the responsibility of a calling code that failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * to complete bpf_link initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) void bpf_link_cleanup(struct bpf_link_primer *primer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) primer->link->prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) bpf_link_free_id(primer->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) fput(primer->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) put_unused_fd(primer->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) void bpf_link_inc(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) atomic64_inc(&link->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /* bpf_link_free is guaranteed to be called from process context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) static void bpf_link_free(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) bpf_link_free_id(link->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (link->prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) /* detach BPF program, clean up used resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) link->ops->release(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) bpf_prog_put(link->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) /* free bpf_link and its containing memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) link->ops->dealloc(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) static void bpf_link_put_deferred(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) struct bpf_link *link = container_of(work, struct bpf_link, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) bpf_link_free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) /* bpf_link_put can be called from atomic context, but ensures that resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) * are freed from process context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) void bpf_link_put(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (!atomic64_dec_and_test(&link->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (in_atomic()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) INIT_WORK(&link->work, bpf_link_put_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) schedule_work(&link->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) bpf_link_free(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) static int bpf_link_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) struct bpf_link *link = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) bpf_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) #define BPF_MAP_TYPE(_id, _ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static const char *bpf_link_type_strs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) #include <linux/bpf_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) #undef BPF_PROG_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) #undef BPF_MAP_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) #undef BPF_LINK_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) const struct bpf_link *link = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) const struct bpf_prog *prog = link->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) "link_type:\t%s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) "link_id:\t%u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) "prog_tag:\t%s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) "prog_id:\t%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) bpf_link_type_strs[link->type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) link->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) prog_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) prog->aux->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (link->ops->show_fdinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) link->ops->show_fdinfo(link, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) static const struct file_operations bpf_link_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) .show_fdinfo = bpf_link_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) .release = bpf_link_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) .read = bpf_dummy_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) .write = bpf_dummy_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) static int bpf_link_alloc_id(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) spin_lock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) spin_unlock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * reserving unused FD and allocating ID from link_idr. This is to be paired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) * with bpf_link_settle() to install FD and ID and expose bpf_link to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * user-space, if bpf_link is successfully attached. If not, bpf_link and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * pre-allocated resources are to be freed with bpf_cleanup() call. All the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * transient state is passed around in struct bpf_link_primer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * This is preferred way to create and initialize bpf_link, especially when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * there are complicated and expensive operations inbetween creating bpf_link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * itself and attaching it to BPF hook. By using bpf_link_prime() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * bpf_link_settle() kernel code using bpf_link doesn't have to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * expensive (and potentially failing) roll back operations in a rare case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * that file, FD, or ID can't be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) int fd, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) fd = get_unused_fd_flags(O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) id = bpf_link_alloc_id(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) put_unused_fd(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) bpf_link_free_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) put_unused_fd(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) return PTR_ERR(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) primer->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) primer->file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) primer->fd = fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) primer->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) int bpf_link_settle(struct bpf_link_primer *primer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) /* make bpf_link fetchable by ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) spin_lock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) primer->link->id = primer->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) spin_unlock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) /* make bpf_link fetchable by FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) fd_install(primer->fd, primer->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) /* pass through installed FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return primer->fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) int bpf_link_new_fd(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct bpf_link *bpf_link_get_from_fd(u32 ufd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct fd f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (f.file->f_op != &bpf_link_fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) link = f.file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) bpf_link_inc(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) struct bpf_tracing_link {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) struct bpf_link link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) enum bpf_attach_type attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct bpf_trampoline *trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) struct bpf_prog *tgt_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) static void bpf_tracing_link_release(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct bpf_tracing_link *tr_link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) container_of(link, struct bpf_tracing_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) tr_link->trampoline));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) bpf_trampoline_put(tr_link->trampoline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* tgt_prog is NULL if target is a kernel function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (tr_link->tgt_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) bpf_prog_put(tr_link->tgt_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) static void bpf_tracing_link_dealloc(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct bpf_tracing_link *tr_link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) container_of(link, struct bpf_tracing_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) kfree(tr_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct bpf_tracing_link *tr_link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) container_of(link, struct bpf_tracing_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) "attach_type:\t%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) tr_link->attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) struct bpf_link_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct bpf_tracing_link *tr_link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) container_of(link, struct bpf_tracing_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) info->tracing.attach_type = tr_link->attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) static const struct bpf_link_ops bpf_tracing_link_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) .release = bpf_tracing_link_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) .dealloc = bpf_tracing_link_dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) .show_fdinfo = bpf_tracing_link_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) .fill_link_info = bpf_tracing_link_fill_link_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) static int bpf_tracing_prog_attach(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) int tgt_prog_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) u32 btf_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) struct bpf_link_primer link_primer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct bpf_prog *tgt_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) struct bpf_trampoline *tr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) struct bpf_tracing_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) u64 key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) switch (prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) prog->expected_attach_type != BPF_TRACE_FEXIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) prog->expected_attach_type != BPF_MODIFY_RETURN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) case BPF_PROG_TYPE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (prog->expected_attach_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) case BPF_PROG_TYPE_LSM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (prog->expected_attach_type != BPF_LSM_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (!!tgt_prog_fd != !!btf_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (tgt_prog_fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (prog->type != BPF_PROG_TYPE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) tgt_prog = bpf_prog_get(tgt_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (IS_ERR(tgt_prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) err = PTR_ERR(tgt_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) tgt_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) key = bpf_trampoline_compute_key(tgt_prog, btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) link = kzalloc(sizeof(*link), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) &bpf_tracing_link_lops, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) link->attach_type = prog->expected_attach_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) mutex_lock(&prog->aux->dst_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /* There are a few possible cases here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * - if prog->aux->dst_trampoline is set, the program was just loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * and not yet attached to anything, so we can use the values stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * in prog->aux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * - if prog->aux->dst_trampoline is NULL, the program has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * attached to a target and its initial target was cleared (below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * target_btf_id using the link_create API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * - if tgt_prog == NULL when this function was called using the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * raw_tracepoint_open API, and we need a target from prog->aux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) * The combination of no saved target in prog->aux, and no target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * specified on load is illegal, and we reject that here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (!prog->aux->dst_trampoline && !tgt_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (!prog->aux->dst_trampoline ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) (key && key != prog->aux->dst_trampoline->key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) /* If there is no saved target, or the specified target is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) * different from the destination specified at load time, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * need a new trampoline and a check for compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) struct bpf_attach_target_info tgt_info = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) &tgt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) tr = bpf_trampoline_get(key, &tgt_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (!tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) /* The caller didn't specify a target, or the target was the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * same as the destination supplied during program load. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * means we can reuse the trampoline and reference from program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * load time, and there is no need to allocate a new one. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * can only happen once for any program, as the saved values in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * prog->aux are cleared below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) tr = prog->aux->dst_trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) tgt_prog = prog->aux->dst_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) err = bpf_link_prime(&link->link, &link_primer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) err = bpf_trampoline_link_prog(prog, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) bpf_link_cleanup(&link_primer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) link->tgt_prog = tgt_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) link->trampoline = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) /* Always clear the trampoline and target prog from prog->aux to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * sure the original attach destination is not kept alive after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * program is (re-)attached to another target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) if (prog->aux->dst_prog &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) (tgt_prog_fd || tr != prog->aux->dst_trampoline))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) /* got extra prog ref from syscall, or attaching to different prog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) bpf_prog_put(prog->aux->dst_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) /* we allocated a new trampoline, so free the old one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) bpf_trampoline_put(prog->aux->dst_trampoline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) prog->aux->dst_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) prog->aux->dst_trampoline = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) mutex_unlock(&prog->aux->dst_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) return bpf_link_settle(&link_primer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (tr && tr != prog->aux->dst_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) bpf_trampoline_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) mutex_unlock(&prog->aux->dst_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) kfree(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) out_put_prog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) if (tgt_prog_fd && tgt_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) bpf_prog_put(tgt_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) struct bpf_raw_tp_link {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) struct bpf_link link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) struct bpf_raw_event_map *btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) static void bpf_raw_tp_link_release(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) struct bpf_raw_tp_link *raw_tp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) container_of(link, struct bpf_raw_tp_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) bpf_put_raw_tracepoint(raw_tp->btp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) struct bpf_raw_tp_link *raw_tp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) container_of(link, struct bpf_raw_tp_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) kfree(raw_tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) struct bpf_raw_tp_link *raw_tp_link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) container_of(link, struct bpf_raw_tp_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) "tp_name:\t%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) raw_tp_link->btp->tp->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct bpf_link_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) struct bpf_raw_tp_link *raw_tp_link =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) container_of(link, struct bpf_raw_tp_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) const char *tp_name = raw_tp_link->btp->tp->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) u32 ulen = info->raw_tracepoint.tp_name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) size_t tp_len = strlen(tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (!ulen ^ !ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) info->raw_tracepoint.tp_name_len = tp_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (!ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (ulen >= tp_len + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (copy_to_user(ubuf, tp_name, tp_len + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) char zero = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (copy_to_user(ubuf, tp_name, ulen - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) if (put_user(zero, ubuf + ulen - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) static const struct bpf_link_ops bpf_raw_tp_link_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) .release = bpf_raw_tp_link_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) .dealloc = bpf_raw_tp_link_dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) .fill_link_info = bpf_raw_tp_link_fill_link_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) struct bpf_link_primer link_primer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct bpf_raw_tp_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) struct bpf_raw_event_map *btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) const char *tp_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) switch (prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) case BPF_PROG_TYPE_EXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) case BPF_PROG_TYPE_LSM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (attr->raw_tracepoint.name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) /* The attach point for this category of programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * should be specified via btf_id during program load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (prog->type == BPF_PROG_TYPE_TRACING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) prog->expected_attach_type == BPF_TRACE_RAW_TP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) tp_name = prog->aux->attach_func_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) err = bpf_tracing_prog_attach(prog, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if (err >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) case BPF_PROG_TYPE_RAW_TRACEPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (strncpy_from_user(buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) u64_to_user_ptr(attr->raw_tracepoint.name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) sizeof(buf) - 1) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) buf[sizeof(buf) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) tp_name = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) btp = bpf_get_raw_tracepoint(tp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) if (!btp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) goto out_put_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) link = kzalloc(sizeof(*link), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) goto out_put_btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) &bpf_raw_tp_link_lops, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) link->btp = btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) err = bpf_link_prime(&link->link, &link_primer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) kfree(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) goto out_put_btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) err = bpf_probe_register(link->btp, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) bpf_link_cleanup(&link_primer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) goto out_put_btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) return bpf_link_settle(&link_primer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) out_put_btp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) bpf_put_raw_tracepoint(btp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) out_put_prog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) enum bpf_attach_type attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) switch (prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) case BPF_PROG_TYPE_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) /* cg-skb progs can be loaded by unpriv user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) * check permissions at attach time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) return prog->enforce_expected_attach_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) prog->expected_attach_type != attach_type ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) static enum bpf_prog_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) attach_type_to_prog_type(enum bpf_attach_type attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) switch (attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) case BPF_CGROUP_INET_INGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) case BPF_CGROUP_INET_EGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) return BPF_PROG_TYPE_CGROUP_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) case BPF_CGROUP_INET_SOCK_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) case BPF_CGROUP_INET_SOCK_RELEASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) case BPF_CGROUP_INET4_POST_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) case BPF_CGROUP_INET6_POST_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return BPF_PROG_TYPE_CGROUP_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) case BPF_CGROUP_INET4_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) case BPF_CGROUP_INET6_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) case BPF_CGROUP_INET4_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) case BPF_CGROUP_INET6_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) case BPF_CGROUP_INET4_GETPEERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) case BPF_CGROUP_INET6_GETPEERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) case BPF_CGROUP_INET4_GETSOCKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) case BPF_CGROUP_INET6_GETSOCKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) case BPF_CGROUP_UDP4_SENDMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) case BPF_CGROUP_UDP6_SENDMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) case BPF_CGROUP_UDP4_RECVMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) case BPF_CGROUP_UDP6_RECVMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) case BPF_CGROUP_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) return BPF_PROG_TYPE_SOCK_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) case BPF_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) return BPF_PROG_TYPE_CGROUP_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) case BPF_SK_MSG_VERDICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) return BPF_PROG_TYPE_SK_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) case BPF_SK_SKB_STREAM_PARSER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) case BPF_SK_SKB_STREAM_VERDICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) return BPF_PROG_TYPE_SK_SKB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) case BPF_LIRC_MODE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) return BPF_PROG_TYPE_LIRC_MODE2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) case BPF_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) return BPF_PROG_TYPE_FLOW_DISSECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) case BPF_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) return BPF_PROG_TYPE_CGROUP_SYSCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) case BPF_CGROUP_GETSOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) case BPF_CGROUP_SETSOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) return BPF_PROG_TYPE_CGROUP_SOCKOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) case BPF_TRACE_ITER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) return BPF_PROG_TYPE_TRACING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) case BPF_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) return BPF_PROG_TYPE_SK_LOOKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) case BPF_XDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return BPF_PROG_TYPE_XDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) return BPF_PROG_TYPE_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) #define BPF_F_ATTACH_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) static int bpf_prog_attach(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) enum bpf_prog_type ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (CHECK_ATTR(BPF_PROG_ATTACH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) ptype = attach_type_to_prog_type(attr->attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (ptype == BPF_PROG_TYPE_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) switch (ptype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) case BPF_PROG_TYPE_SK_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) case BPF_PROG_TYPE_SK_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ret = sock_map_get_from_fd(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) case BPF_PROG_TYPE_LIRC_MODE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) ret = lirc_prog_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) case BPF_PROG_TYPE_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) ret = netns_bpf_prog_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) case BPF_PROG_TYPE_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) case BPF_PROG_TYPE_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) case BPF_PROG_TYPE_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) ret = cgroup_bpf_prog_attach(attr, ptype, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) #define BPF_PROG_DETACH_LAST_FIELD attach_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) static int bpf_prog_detach(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) enum bpf_prog_type ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (CHECK_ATTR(BPF_PROG_DETACH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) ptype = attach_type_to_prog_type(attr->attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) switch (ptype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) case BPF_PROG_TYPE_SK_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) case BPF_PROG_TYPE_SK_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) return sock_map_prog_detach(attr, ptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) case BPF_PROG_TYPE_LIRC_MODE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) return lirc_prog_detach(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) case BPF_PROG_TYPE_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) return netns_bpf_prog_detach(attr, ptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) case BPF_PROG_TYPE_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) case BPF_PROG_TYPE_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) case BPF_PROG_TYPE_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) return cgroup_bpf_prog_detach(attr, ptype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) static int bpf_prog_query(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (CHECK_ATTR(BPF_PROG_QUERY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) switch (attr->query.attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) case BPF_CGROUP_INET_INGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) case BPF_CGROUP_INET_EGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) case BPF_CGROUP_INET_SOCK_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) case BPF_CGROUP_INET_SOCK_RELEASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) case BPF_CGROUP_INET4_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) case BPF_CGROUP_INET6_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) case BPF_CGROUP_INET4_POST_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) case BPF_CGROUP_INET6_POST_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) case BPF_CGROUP_INET4_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) case BPF_CGROUP_INET6_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) case BPF_CGROUP_INET4_GETPEERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) case BPF_CGROUP_INET6_GETPEERNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) case BPF_CGROUP_INET4_GETSOCKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) case BPF_CGROUP_INET6_GETSOCKNAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) case BPF_CGROUP_UDP4_SENDMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) case BPF_CGROUP_UDP6_SENDMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) case BPF_CGROUP_UDP4_RECVMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) case BPF_CGROUP_UDP6_RECVMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) case BPF_CGROUP_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) case BPF_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) case BPF_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) case BPF_CGROUP_GETSOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) case BPF_CGROUP_SETSOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) return cgroup_bpf_prog_query(attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) case BPF_LIRC_MODE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return lirc_prog_query(attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) case BPF_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) case BPF_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) return netns_bpf_prog_query(attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) static int bpf_prog_test_run(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) int ret = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) if (CHECK_ATTR(BPF_PROG_TEST_RUN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) (!attr->test.ctx_size_in && attr->test.ctx_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) (!attr->test.ctx_size_out && attr->test.ctx_out))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) prog = bpf_prog_get(attr->test.prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) if (prog->aux->ops->test_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) ret = prog->aux->ops->test_run(prog, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) static int bpf_obj_get_next_id(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) union bpf_attr __user *uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) struct idr *idr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) u32 next_id = attr->start_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) next_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) spin_lock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) if (!idr_get_next(idr, &next_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) spin_unlock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) err = put_user(next_id, &uattr->next_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) spin_lock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) map = idr_get_next(&map_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) map = __bpf_map_inc_not_zero(map, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (IS_ERR(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) (*id)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) spin_unlock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) spin_lock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) prog = idr_get_next(&prog_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) if (prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) prog = bpf_prog_inc_not_zero(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (IS_ERR(prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) (*id)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) spin_unlock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) struct bpf_prog *bpf_prog_by_id(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) spin_lock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) prog = idr_find(&prog_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) prog = bpf_prog_inc_not_zero(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) prog = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) spin_unlock_bh(&prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) return prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) u32 id = attr->prog_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) prog = bpf_prog_by_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) fd = bpf_prog_new_fd(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) u32 id = attr->map_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) int f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) attr->open_flags & ~BPF_OBJ_FLAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) f_flags = bpf_get_file_flag(attr->open_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) if (f_flags < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) spin_lock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) map = idr_find(&map_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) map = __bpf_map_inc_not_zero(map, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) map = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) spin_unlock_bh(&map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) fd = bpf_map_new_fd(map, f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) bpf_map_put_with_uref(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) unsigned long addr, u32 *off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) u32 *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) const struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) mutex_lock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) map = prog->aux->used_maps[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) if (map == (void *)addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) *type = BPF_PSEUDO_MAP_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (!map->ops->map_direct_value_meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) if (!map->ops->map_direct_value_meta(map, addr, off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) *type = BPF_PSEUDO_MAP_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) mutex_unlock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) const struct cred *f_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) const struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) struct bpf_insn *insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) u32 off, type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) u64 imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) u8 code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) if (!insns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) return insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) for (i = 0; i < prog->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) code = insns[i].code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) if (code == (BPF_JMP | BPF_TAIL_CALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) insns[i].code = BPF_JMP | BPF_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) insns[i].imm = BPF_FUNC_tail_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) /* fall-through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) if (code == (BPF_JMP | BPF_CALL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) code == (BPF_JMP | BPF_CALL_ARGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) if (code == (BPF_JMP | BPF_CALL_ARGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) insns[i].code = BPF_JMP | BPF_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (!bpf_dump_raw_ok(f_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) insns[i].imm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (code != (BPF_LD | BPF_IMM | BPF_DW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) map = bpf_map_from_imm(prog, imm, &off, &type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) if (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) insns[i].src_reg = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) insns[i].imm = map->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) insns[i + 1].imm = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) return insns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) static int set_info_rec_size(struct bpf_prog_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) * Ensure info.*_rec_size is the same as kernel expected size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) * or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) * Only allow zero *_rec_size if both _rec_size and _cnt are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) * zero. In this case, the kernel will set the expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) * _rec_size back to the info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if ((info->nr_func_info || info->func_info_rec_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) info->func_info_rec_size != sizeof(struct bpf_func_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if ((info->nr_line_info || info->line_info_rec_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) info->line_info_rec_size != sizeof(struct bpf_line_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) info->jited_line_info_rec_size != sizeof(__u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) info->func_info_rec_size = sizeof(struct bpf_func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) info->line_info_rec_size = sizeof(struct bpf_line_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) info->jited_line_info_rec_size = sizeof(__u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) static int bpf_prog_get_info_by_fd(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) struct bpf_prog_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) u32 info_len = attr->info.info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) struct bpf_prog_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) char __user *uinsns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) u32 ulen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) info_len = min_t(u32, sizeof(info), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) if (copy_from_user(&info, uinfo, info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) info.type = prog->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) info.id = prog->aux->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) info.load_time = prog->aux->load_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) info.created_by_uid = from_kuid_munged(current_user_ns(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) prog->aux->user->uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) info.gpl_compatible = prog->gpl_compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) memcpy(info.tag, prog->tag, sizeof(prog->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) mutex_lock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) ulen = info.nr_map_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) info.nr_map_ids = prog->aux->used_map_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) ulen = min_t(u32, info.nr_map_ids, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) if (ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) for (i = 0; i < ulen; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (put_user(prog->aux->used_maps[i]->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) &user_map_ids[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) mutex_unlock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) mutex_unlock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) err = set_info_rec_size(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) bpf_prog_get_stats(prog, &stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) info.run_time_ns = stats.nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) info.run_cnt = stats.cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (!bpf_capable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) info.jited_prog_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) info.xlated_prog_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) info.nr_jited_ksyms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) info.nr_jited_func_lens = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) info.nr_func_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) info.nr_line_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) info.nr_jited_line_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) ulen = info.xlated_prog_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) info.xlated_prog_len = bpf_prog_insn_size(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) if (info.xlated_prog_len && ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) struct bpf_insn *insns_sanitized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) bool fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) info.xlated_prog_insns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) if (!insns_sanitized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) uinsns = u64_to_user_ptr(info.xlated_prog_insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) ulen = min_t(u32, info.xlated_prog_len, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) fault = copy_to_user(uinsns, insns_sanitized, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) kfree(insns_sanitized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (bpf_prog_is_dev_bound(prog->aux)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) err = bpf_prog_offload_info_fill(&info, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) /* NOTE: the following code is supposed to be skipped for offload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) * bpf_prog_offload_info_fill() is the place to fill similar fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) * for offload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) ulen = info.jited_prog_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) if (prog->aux->func_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) info.jited_prog_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) for (i = 0; i < prog->aux->func_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) info.jited_prog_len += prog->aux->func[i]->jited_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) info.jited_prog_len = prog->jited_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) if (info.jited_prog_len && ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if (bpf_dump_raw_ok(file->f_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) uinsns = u64_to_user_ptr(info.jited_prog_insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) ulen = min_t(u32, info.jited_prog_len, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) /* for multi-function programs, copy the JITed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) * instructions for all the functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) if (prog->aux->func_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) u32 len, free, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) u8 *img;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) free = ulen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) for (i = 0; i < prog->aux->func_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) len = prog->aux->func[i]->jited_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) len = min_t(u32, len, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) img = (u8 *) prog->aux->func[i]->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (copy_to_user(uinsns, img, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) uinsns += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) free -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (!free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (copy_to_user(uinsns, prog->bpf_func, ulen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) info.jited_prog_insns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) ulen = info.nr_jited_ksyms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) if (bpf_dump_raw_ok(file->f_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) unsigned long ksym_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) u64 __user *user_ksyms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) /* copy the address of the kernel symbol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) * corresponding to each function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) ulen = min_t(u32, info.nr_jited_ksyms, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) user_ksyms = u64_to_user_ptr(info.jited_ksyms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) if (prog->aux->func_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) for (i = 0; i < ulen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) ksym_addr = (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) prog->aux->func[i]->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) if (put_user((u64) ksym_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) &user_ksyms[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) ksym_addr = (unsigned long) prog->bpf_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) if (put_user((u64) ksym_addr, &user_ksyms[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) info.jited_ksyms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) ulen = info.nr_jited_func_lens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if (bpf_dump_raw_ok(file->f_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) u32 __user *user_lens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) u32 func_len, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) /* copy the JITed image lengths for each function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) ulen = min_t(u32, info.nr_jited_func_lens, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) user_lens = u64_to_user_ptr(info.jited_func_lens);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) if (prog->aux->func_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) for (i = 0; i < ulen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) func_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) prog->aux->func[i]->jited_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) if (put_user(func_len, &user_lens[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) func_len = prog->jited_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (put_user(func_len, &user_lens[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) info.jited_func_lens = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) if (prog->aux->btf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) info.btf_id = btf_id(prog->aux->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) ulen = info.nr_func_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) info.nr_func_info = prog->aux->func_info_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) if (info.nr_func_info && ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) char __user *user_finfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) user_finfo = u64_to_user_ptr(info.func_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) ulen = min_t(u32, info.nr_func_info, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) if (copy_to_user(user_finfo, prog->aux->func_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) info.func_info_rec_size * ulen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) ulen = info.nr_line_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) info.nr_line_info = prog->aux->nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) if (info.nr_line_info && ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) __u8 __user *user_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) user_linfo = u64_to_user_ptr(info.line_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) ulen = min_t(u32, info.nr_line_info, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) if (copy_to_user(user_linfo, prog->aux->linfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) info.line_info_rec_size * ulen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) ulen = info.nr_jited_line_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) if (prog->aux->jited_linfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) info.nr_jited_line_info = prog->aux->nr_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) info.nr_jited_line_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (info.nr_jited_line_info && ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) if (bpf_dump_raw_ok(file->f_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) __u64 __user *user_linfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) user_linfo = u64_to_user_ptr(info.jited_line_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) ulen = min_t(u32, info.nr_jited_line_info, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) for (i = 0; i < ulen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (put_user((__u64)(long)prog->aux->jited_linfo[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) &user_linfo[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) info.jited_line_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) ulen = info.nr_prog_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) info.nr_prog_tags = prog->aux->func_cnt ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) if (ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) user_prog_tags = u64_to_user_ptr(info.prog_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) ulen = min_t(u32, info.nr_prog_tags, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) if (prog->aux->func_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) for (i = 0; i < ulen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) if (copy_to_user(user_prog_tags[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) prog->aux->func[i]->tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) BPF_TAG_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) if (copy_to_user(user_prog_tags[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) prog->tag, BPF_TAG_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) if (copy_to_user(uinfo, &info, info_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) put_user(info_len, &uattr->info.info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) static int bpf_map_get_info_by_fd(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) struct bpf_map_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) u32 info_len = attr->info.info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) info_len = min_t(u32, sizeof(info), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) info.type = map->map_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) info.id = map->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) info.key_size = map->key_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) info.value_size = map->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) info.max_entries = map->max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) info.map_flags = map->map_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) memcpy(info.name, map->name, sizeof(map->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) if (map->btf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) info.btf_id = btf_id(map->btf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) info.btf_key_type_id = map->btf_key_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) info.btf_value_type_id = map->btf_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) if (bpf_map_is_dev_bound(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) err = bpf_map_offload_info_fill(&info, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (copy_to_user(uinfo, &info, info_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) put_user(info_len, &uattr->info.info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) static int bpf_btf_get_info_by_fd(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) struct btf *btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) u32 info_len = attr->info.info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) return btf_get_info_by_fd(btf, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) static int bpf_link_get_info_by_fd(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) struct bpf_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) struct bpf_link_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) u32 info_len = attr->info.info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) info_len = min_t(u32, sizeof(info), info_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) if (copy_from_user(&info, uinfo, info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) info.type = link->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) info.id = link->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) info.prog_id = link->prog->aux->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if (link->ops->fill_link_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) err = link->ops->fill_link_info(link, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) if (copy_to_user(uinfo, &info, info_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) put_user(info_len, &uattr->info.info_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) int ufd = attr->info.bpf_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) return -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) if (f.file->f_op == &bpf_prog_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) else if (f.file->f_op == &bpf_map_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) else if (f.file->f_op == &btf_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) else if (f.file->f_op == &bpf_link_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) static int bpf_btf_load(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) if (CHECK_ATTR(BPF_BTF_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) if (!bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) return btf_new_fd(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) return btf_get_fd_by_id(attr->btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) static int bpf_task_fd_query_copy(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) union bpf_attr __user *uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) u32 prog_id, u32 fd_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) const char *buf, u64 probe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) u64 probe_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) u32 len = buf ? strlen(buf) : 0, input_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) if (put_user(len, &uattr->task_fd_query.buf_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) input_len = attr->task_fd_query.buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) if (input_len && ubuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) if (!len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) /* nothing to copy, just make ubuf NULL terminated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) char zero = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) if (put_user(zero, ubuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) } else if (input_len >= len + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) /* ubuf can hold the string with NULL terminator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) if (copy_to_user(ubuf, buf, len + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) /* ubuf cannot hold the string with NULL terminator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) * do a partial copy with NULL terminator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) char zero = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (copy_to_user(ubuf, buf, input_len - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) if (put_user(zero, ubuf + input_len - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) put_user(fd_type, &uattr->task_fd_query.fd_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) put_user(probe_addr, &uattr->task_fd_query.probe_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) static int bpf_task_fd_query(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) union bpf_attr __user *uattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) pid_t pid = attr->task_fd_query.pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) u32 fd = attr->task_fd_query.fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) const struct perf_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) struct files_struct *files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) if (CHECK_ATTR(BPF_TASK_FD_QUERY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) if (attr->task_fd_query.flags != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (!task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) files = get_files_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) if (!files)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) spin_lock(&files->file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) file = fcheck_files(files, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) err = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) get_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) spin_unlock(&files->file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) put_files_struct(files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) if (file->f_op == &bpf_link_fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) struct bpf_link *link = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) if (link->ops == &bpf_raw_tp_link_lops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) struct bpf_raw_tp_link *raw_tp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) container_of(link, struct bpf_raw_tp_link, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) struct bpf_raw_event_map *btp = raw_tp->btp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) err = bpf_task_fd_query_copy(attr, uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) raw_tp->link.prog->aux->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) BPF_FD_TYPE_RAW_TRACEPOINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) btp->tp->name, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) goto put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) goto out_not_supp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) event = perf_get_event(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) if (!IS_ERR(event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) u64 probe_offset, probe_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) u32 prog_id, fd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) const char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) &buf, &probe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) &probe_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) err = bpf_task_fd_query_copy(attr, uattr, prog_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) fd_type, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) probe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) probe_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) goto put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) out_not_supp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) put_file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) #define BPF_MAP_BATCH_LAST_FIELD batch.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) #define BPF_DO_BATCH(fn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) if (!fn) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) err = -ENOTSUPP; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) goto err_put; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) err = fn(map, attr, uattr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) static int bpf_map_do_batch(const union bpf_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) union bpf_attr __user *uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) int err, ufd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (CHECK_ATTR(BPF_MAP_BATCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) ufd = attr->batch.map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) f = fdget(ufd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) map = __bpf_map_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) if (IS_ERR(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) return PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (has_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) bpf_map_write_active_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) if (cmd == BPF_MAP_LOOKUP_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) BPF_DO_BATCH(map->ops->map_lookup_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) else if (cmd == BPF_MAP_UPDATE_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) BPF_DO_BATCH(map->ops->map_update_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) BPF_DO_BATCH(map->ops->map_delete_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) if (has_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) bpf_map_write_active_dec(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) if (attr->link_create.attach_type != prog->expected_attach_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) if (prog->expected_attach_type == BPF_TRACE_ITER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) return bpf_iter_link_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) else if (prog->type == BPF_PROG_TYPE_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) return bpf_tracing_prog_attach(prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) attr->link_create.target_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) attr->link_create.target_btf_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) static int link_create(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) enum bpf_prog_type ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (CHECK_ATTR(BPF_LINK_CREATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) prog = bpf_prog_get(attr->link_create.prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) ret = bpf_prog_attach_check_attach_type(prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) attr->link_create.attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (prog->type == BPF_PROG_TYPE_EXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) ret = tracing_bpf_link_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) ptype = attach_type_to_prog_type(attr->link_create.attach_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) switch (ptype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) case BPF_PROG_TYPE_CGROUP_SKB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) case BPF_PROG_TYPE_CGROUP_SOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) case BPF_PROG_TYPE_SOCK_OPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) case BPF_PROG_TYPE_CGROUP_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) case BPF_PROG_TYPE_CGROUP_SYSCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) case BPF_PROG_TYPE_CGROUP_SOCKOPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) ret = cgroup_bpf_link_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) case BPF_PROG_TYPE_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) ret = tracing_bpf_link_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) case BPF_PROG_TYPE_FLOW_DISSECTOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) case BPF_PROG_TYPE_SK_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) ret = netns_bpf_link_create(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) #ifdef CONFIG_NET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) case BPF_PROG_TYPE_XDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) ret = bpf_xdp_link_attach(attr, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) static int link_update(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) struct bpf_prog *old_prog = NULL, *new_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) if (CHECK_ATTR(BPF_LINK_UPDATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) flags = attr->link_update.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (flags & ~BPF_F_REPLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) link = bpf_link_get_from_fd(attr->link_update.link_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) return PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (IS_ERR(new_prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) ret = PTR_ERR(new_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) goto out_put_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) if (flags & BPF_F_REPLACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) if (IS_ERR(old_prog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) ret = PTR_ERR(old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) old_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) goto out_put_progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) } else if (attr->link_update.old_prog_fd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) goto out_put_progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) if (link->ops->update_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) ret = link->ops->update_prog(link, new_prog, old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) out_put_progs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) if (old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) bpf_prog_put(old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) bpf_prog_put(new_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) out_put_link:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) bpf_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) static int link_detach(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) if (CHECK_ATTR(BPF_LINK_DETACH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) link = bpf_link_get_from_fd(attr->link_detach.link_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) return PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (link->ops->detach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) ret = link->ops->detach(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) bpf_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) struct bpf_link *bpf_link_by_id(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) if (!id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) spin_lock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) /* before link is "settled", ID is 0, pretend it doesn't exist yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) link = idr_find(&link_idr, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) if (link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) if (link->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) link = bpf_link_inc_not_zero(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) link = ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) link = ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) spin_unlock_bh(&link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) u32 id = attr->link_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) link = bpf_link_by_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) return PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) fd = bpf_link_new_fd(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) bpf_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) DEFINE_MUTEX(bpf_stats_enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) static int bpf_stats_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) mutex_lock(&bpf_stats_enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) static_key_slow_dec(&bpf_stats_enabled_key.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) mutex_unlock(&bpf_stats_enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) static const struct file_operations bpf_stats_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) .release = bpf_stats_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) static int bpf_enable_runtime_stats(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) mutex_lock(&bpf_stats_enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) /* Set a very high limit to avoid overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) mutex_unlock(&bpf_stats_enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) if (fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) static_key_slow_inc(&bpf_stats_enabled_key.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) mutex_unlock(&bpf_stats_enabled_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) return fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) static int bpf_enable_stats(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) if (CHECK_ATTR(BPF_ENABLE_STATS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) switch (attr->enable_stats.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) case BPF_STATS_RUN_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) return bpf_enable_runtime_stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) static int bpf_iter_create(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) struct bpf_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) if (CHECK_ATTR(BPF_ITER_CREATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) if (attr->iter_create.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) link = bpf_link_get_from_fd(attr->iter_create.link_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) if (IS_ERR(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) return PTR_ERR(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) err = bpf_iter_new_fd(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) bpf_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) static int bpf_prog_bind_map(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) struct bpf_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) struct bpf_map **used_maps_old, **used_maps_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) if (CHECK_ATTR(BPF_PROG_BIND_MAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) if (attr->prog_bind_map.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) map = bpf_map_get(attr->prog_bind_map.map_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) if (IS_ERR(map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) ret = PTR_ERR(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) goto out_prog_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) mutex_lock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) used_maps_old = prog->aux->used_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) for (i = 0; i < prog->aux->used_map_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) if (used_maps_old[i] == map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) bpf_map_put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) sizeof(used_maps_new[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if (!used_maps_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) memcpy(used_maps_new, used_maps_old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) used_maps_new[prog->aux->used_map_cnt] = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) prog->aux->used_map_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) prog->aux->used_maps = used_maps_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) kfree(used_maps_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) mutex_unlock(&prog->aux->used_maps_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) bpf_map_put(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) out_prog_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) union bpf_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) size = min_t(u32, size, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) /* copy attributes from user space, may be less than sizeof(bpf_attr) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) memset(&attr, 0, sizeof(attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) if (copy_from_user(&attr, uattr, size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) trace_android_vh_check_bpf_syscall(cmd, &attr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) err = security_bpf(cmd, &attr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) case BPF_MAP_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) err = map_create(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) case BPF_MAP_LOOKUP_ELEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) err = map_lookup_elem(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) case BPF_MAP_UPDATE_ELEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) err = map_update_elem(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) case BPF_MAP_DELETE_ELEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) err = map_delete_elem(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) case BPF_MAP_GET_NEXT_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) err = map_get_next_key(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) case BPF_MAP_FREEZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) err = map_freeze(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) case BPF_PROG_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) err = bpf_prog_load(&attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) case BPF_OBJ_PIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) err = bpf_obj_pin(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) case BPF_OBJ_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) err = bpf_obj_get(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) case BPF_PROG_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) err = bpf_prog_attach(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) case BPF_PROG_DETACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) err = bpf_prog_detach(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) case BPF_PROG_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) err = bpf_prog_query(&attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) case BPF_PROG_TEST_RUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) err = bpf_prog_test_run(&attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) case BPF_PROG_GET_NEXT_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) err = bpf_obj_get_next_id(&attr, uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) &prog_idr, &prog_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) case BPF_MAP_GET_NEXT_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) err = bpf_obj_get_next_id(&attr, uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) &map_idr, &map_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) case BPF_BTF_GET_NEXT_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) err = bpf_obj_get_next_id(&attr, uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) &btf_idr, &btf_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) case BPF_PROG_GET_FD_BY_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) err = bpf_prog_get_fd_by_id(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) case BPF_MAP_GET_FD_BY_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) err = bpf_map_get_fd_by_id(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) case BPF_OBJ_GET_INFO_BY_FD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) err = bpf_obj_get_info_by_fd(&attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) case BPF_RAW_TRACEPOINT_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) err = bpf_raw_tracepoint_open(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) case BPF_BTF_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) err = bpf_btf_load(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) case BPF_BTF_GET_FD_BY_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) err = bpf_btf_get_fd_by_id(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) case BPF_TASK_FD_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) err = bpf_task_fd_query(&attr, uattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) err = map_lookup_and_delete_elem(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) case BPF_MAP_LOOKUP_BATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) err = bpf_map_do_batch(&attr, uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) BPF_MAP_LOOKUP_AND_DELETE_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) case BPF_MAP_UPDATE_BATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) case BPF_MAP_DELETE_BATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) case BPF_LINK_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) err = link_create(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) case BPF_LINK_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) err = link_update(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) case BPF_LINK_GET_FD_BY_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) err = bpf_link_get_fd_by_id(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) case BPF_LINK_GET_NEXT_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) err = bpf_obj_get_next_id(&attr, uattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) &link_idr, &link_idr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) case BPF_ENABLE_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) err = bpf_enable_stats(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) case BPF_ITER_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) err = bpf_iter_create(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) case BPF_LINK_DETACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) err = link_detach(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) case BPF_PROG_BIND_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) err = bpf_prog_bind_map(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) }