^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2017-2018 Netronome Systems, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This software is licensed under the GNU General License Version 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * June 1991 as shown in the file COPYING in the top-level directory of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * source tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bpf_verifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kdev_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/proc_ns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Protects offdevs, members of bpf_offload_netdev and offload members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * of all progs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * RTNL lock cannot be taken when holding this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static DECLARE_RWSEM(bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct bpf_offload_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) const struct bpf_prog_offload_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct list_head netdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct bpf_offload_netdev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct rhash_head l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct bpf_offload_dev *offdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct list_head progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct list_head maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct list_head offdev_netdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static const struct rhashtable_params offdevs_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) .nelem_hint = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) .key_len = sizeof(struct net_device *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) .key_offset = offsetof(struct bpf_offload_netdev, netdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .head_offset = offsetof(struct bpf_offload_netdev, l),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct rhashtable offdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static bool offdevs_inited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int bpf_dev_offload_check(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!netdev->netdev_ops->ndo_bpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static struct bpf_offload_netdev *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bpf_offload_find_netdev(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) lockdep_assert_held(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!offdevs_inited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct bpf_offload_netdev *ondev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) attr->prog_type != BPF_PROG_TYPE_XDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (attr->prog_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) offload = kzalloc(sizeof(*offload), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) offload->prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) attr->prog_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) err = bpf_dev_offload_check(offload->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto err_maybe_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ondev = bpf_offload_find_netdev(offload->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!ondev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) offload->offdev = ondev->offdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) prog->aux->offload = offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) list_add_tail(&offload->offloads, &ondev->progs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev_put(offload->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) err_maybe_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (offload->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dev_put(offload->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) kfree(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) offload = prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ret = offload->offdev->ops->prepare(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) offload->dev_state = !ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int insn_idx, int prev_insn_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) offload = env->prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ret = offload->offdev->ops->insn_hook(env, insn_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) prev_insn_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) offload = env->prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (offload->offdev->ops->finalize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ret = offload->offdev->ops->finalize(env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) const struct bpf_prog_offload_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) offload = env->prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ops = offload->offdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!offload->opt_failed && ops->replace_insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = ops->replace_insn(env, off, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) offload->opt_failed |= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) offload = env->prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!offload->opt_failed && offload->offdev->ops->remove_insns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ret = offload->offdev->ops->remove_insns(env, off, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) offload->opt_failed |= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct bpf_prog_offload *offload = prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (offload->dev_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) offload->offdev->ops->destroy(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) bpf_prog_free_id(prog, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) list_del_init(&offload->offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) kfree(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) prog->aux->offload = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void bpf_prog_offload_destroy(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (prog->aux->offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __bpf_prog_offload_destroy(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int bpf_prog_offload_translate(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) offload = prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = offload->offdev->ops->translate(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static unsigned int bpf_prog_warn_on_exec(const void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) const struct bpf_insn *insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) WARN(1, "attempt to execute device eBPF program on the host!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int bpf_prog_offload_compile(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) prog->bpf_func = bpf_prog_warn_on_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return bpf_prog_offload_translate(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct ns_get_path_bpf_prog_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct bpf_prog_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct ns_get_path_bpf_prog_args *args = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct bpf_prog_aux *aux = args->prog->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct ns_common *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (aux->offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) args->info->ifindex = aux->offload->netdev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) net = dev_net(aux->offload->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) get_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ns = &net->ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) args->info->ifindex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct ns_get_path_bpf_prog_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) .prog = prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) .info = info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct bpf_prog_aux *aux = prog->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct inode *ns_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct path ns_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) char __user *uinsns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 ulen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!info->ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!aux->offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ulen = info->jited_prog_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) info->jited_prog_len = aux->offload->jited_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (info->jited_prog_len && ulen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) uinsns = u64_to_user_ptr(info->jited_prog_insns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ulen = min_t(u32, info->jited_prog_len, ulen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ns_inode = ns_path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) info->netns_ino = ns_inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) path_put(&ns_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) const struct bpf_prog_ops bpf_offload_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) enum bpf_netdev_command cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct netdev_bpf data = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) data.command = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) data.offmap = offmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Caller must make sure netdev is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) netdev = offmap->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return netdev->netdev_ops->ndo_bpf(netdev, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct net *net = current->nsproxy->net_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct bpf_offload_netdev *ondev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct bpf_offloaded_map *offmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return ERR_PTR(-EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) attr->map_type != BPF_MAP_TYPE_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) offmap = kzalloc(sizeof(*offmap), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!offmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bpf_map_init_from_attr(&offmap->map, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) err = bpf_dev_offload_check(offmap->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ondev = bpf_offload_find_netdev(offmap->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!ondev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) list_add_tail(&offmap->offloads, &ondev->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return &offmap->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) kfree(offmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) bpf_map_free_id(&offmap->map, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) list_del_init(&offmap->offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) offmap->netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void bpf_map_offload_map_free(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct bpf_offloaded_map *offmap = map_to_offmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (offmap->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) __bpf_map_offload_destroy(offmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) kfree(offmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct bpf_offloaded_map *offmap = map_to_offmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (offmap->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int bpf_map_offload_update_elem(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) void *key, void *value, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct bpf_offloaded_map *offmap = map_to_offmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (unlikely(flags > BPF_EXIST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (offmap->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = offmap->dev_ops->map_update_elem(offmap, key, value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct bpf_offloaded_map *offmap = map_to_offmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (offmap->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ret = offmap->dev_ops->map_delete_elem(offmap, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct bpf_offloaded_map *offmap = map_to_offmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (offmap->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct ns_get_path_bpf_map_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct bpf_offloaded_map *offmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct bpf_map_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct ns_get_path_bpf_map_args *args = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct ns_common *ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (args->offmap->netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) args->info->ifindex = args->offmap->netdev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) net = dev_net(args->offmap->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) get_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ns = &net->ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) args->info->ifindex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct ns_get_path_bpf_map_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .offmap = map_to_offmap(map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) .info = info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct inode *ns_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct path ns_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!info->ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ns_inode = ns_path.dentry->d_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) info->netns_ino = ns_inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) path_put(&ns_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static bool __bpf_offload_dev_match(struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct bpf_offload_netdev *ondev1, *ondev2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct bpf_prog_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!bpf_prog_is_dev_bound(prog->aux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) offload = prog->aux->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (offload->netdev == netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ondev1 = bpf_offload_find_netdev(offload->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ondev2 = bpf_offload_find_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ret = __bpf_offload_dev_match(prog, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct bpf_offloaded_map *offmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!bpf_map_is_dev_bound(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return bpf_map_offload_neutral(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) offmap = map_to_offmap(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) down_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = __bpf_offload_dev_match(prog, offmap->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) up_read(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct bpf_offload_netdev *ondev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!ondev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ondev->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ondev->offdev = offdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) INIT_LIST_HEAD(&ondev->progs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) INIT_LIST_HEAD(&ondev->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) netdev_warn(netdev, "failed to register for BPF offload\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto err_unlock_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) list_add(&ondev->offdev_netdevs, &offdev->netdevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) err_unlock_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) kfree(ondev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct bpf_offload_netdev *ondev, *altdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct bpf_offloaded_map *offmap, *mtmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct bpf_prog_offload *offload, *ptmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (WARN_ON(!ondev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) list_del(&ondev->offdev_netdevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Try to move the objects to another netdev of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) altdev = list_first_entry_or_null(&offdev->netdevs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct bpf_offload_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) offdev_netdevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (altdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) list_for_each_entry(offload, &ondev->progs, offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) offload->netdev = altdev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) list_splice_init(&ondev->progs, &altdev->progs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list_for_each_entry(offmap, &ondev->maps, offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) offmap->netdev = altdev->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) list_splice_init(&ondev->maps, &altdev->maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) __bpf_prog_offload_destroy(offload->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) __bpf_map_offload_destroy(offmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) WARN_ON(!list_empty(&ondev->progs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) WARN_ON(!list_empty(&ondev->maps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) kfree(ondev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct bpf_offload_dev *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct bpf_offload_dev *offdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) down_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!offdevs_inited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) err = rhashtable_init(&offdevs, &offdevs_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) offdevs_inited = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) up_write(&bpf_devs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!offdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) offdev->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) offdev->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) INIT_LIST_HEAD(&offdev->netdevs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return offdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) WARN_ON(!list_empty(&offdev->netdevs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) kfree(offdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return offdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);