Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* XSKMAP used for AF_XDP sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <net/xdp_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "xsk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) int xsk_map_inc(struct xsk_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	bpf_map_inc(&map->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) void xsk_map_put(struct xsk_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	bpf_map_put(&map->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 					       struct xdp_sock **map_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct xsk_map_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	err = xsk_map_inc(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	node->map = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	node->map_entry = map_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static void xsk_map_node_free(struct xsk_map_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	xsk_map_put(node->map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	spin_lock_bh(&xs->map_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	list_add_tail(&node->node, &xs->map_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	spin_unlock_bh(&xs->map_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static void xsk_map_sock_delete(struct xdp_sock *xs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 				struct xdp_sock **map_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct xsk_map_node *n, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	spin_lock_bh(&xs->map_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		if (map_entry == n->map_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			list_del(&n->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			xsk_map_node_free(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	spin_unlock_bh(&xs->map_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct bpf_map_memory mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	int err, numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct xsk_map *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		return ERR_PTR(-EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (attr->max_entries == 0 || attr->key_size != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	    attr->value_size != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	    attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	numa_node = bpf_map_attr_numa_node(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	size = struct_size(m, xsk_map, attr->max_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	err = bpf_map_charge_init(&mem, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	m = bpf_map_area_alloc(size, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (!m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		bpf_map_charge_finish(&mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	bpf_map_init_from_attr(&m->map, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	bpf_map_charge_move(&m->map.memory, &mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	spin_lock_init(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return &m->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void xsk_map_free(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct xsk_map *m = container_of(map, struct xsk_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	bpf_clear_redirect_map(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	bpf_map_area_free(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct xsk_map *m = container_of(map, struct xsk_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	u32 index = key ? *(u32 *)key : U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	u32 *next = next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (index >= m->map.max_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		*next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (index == m->map.max_entries - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	*next = index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct bpf_insn *insn = insn_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	*insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	*insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	*insn++ = BPF_MOV64_IMM(ret, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return insn - insn_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	WARN_ON_ONCE(!rcu_read_lock_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return __xsk_map_lookup_elem(map, *(u32 *)key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			       u64 map_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct xsk_map *m = container_of(map, struct xsk_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct xdp_sock *xs, *old_xs, **map_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	u32 i = *(u32 *)key, fd = *(u32 *)value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct xsk_map_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (unlikely(map_flags > BPF_EXIST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (unlikely(i >= m->map.max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	sock = sockfd_lookup(fd, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (sock->sk->sk_family != PF_XDP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	xs = (struct xdp_sock *)sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	map_entry = &m->xsk_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	node = xsk_map_node_alloc(m, map_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (IS_ERR(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		return PTR_ERR(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	spin_lock_bh(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	old_xs = READ_ONCE(*map_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (old_xs == xs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	} else if (old_xs && map_flags == BPF_NOEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	} else if (!old_xs && map_flags == BPF_EXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	xsk_map_sock_add(xs, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	WRITE_ONCE(*map_entry, xs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (old_xs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		xsk_map_sock_delete(old_xs, map_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	spin_unlock_bh(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	spin_unlock_bh(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	xsk_map_node_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int xsk_map_delete_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct xsk_map *m = container_of(map, struct xsk_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct xdp_sock *old_xs, **map_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	int k = *(u32 *)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if (k >= map->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	spin_lock_bh(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	map_entry = &m->xsk_map[k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	old_xs = xchg(map_entry, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (old_xs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		xsk_map_sock_delete(old_xs, map_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	spin_unlock_bh(&m->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			     struct xdp_sock **map_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	spin_lock_bh(&map->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (READ_ONCE(*map_entry) == xs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		WRITE_ONCE(*map_entry, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		xsk_map_sock_delete(xs, map_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	spin_unlock_bh(&map->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static bool xsk_map_meta_equal(const struct bpf_map *meta0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			       const struct bpf_map *meta1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	return meta0->max_entries == meta1->max_entries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		bpf_map_meta_equal(meta0, meta1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int xsk_map_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) const struct bpf_map_ops xsk_map_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	.map_meta_equal = xsk_map_meta_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	.map_alloc = xsk_map_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	.map_free = xsk_map_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	.map_get_next_key = xsk_map_get_next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	.map_lookup_elem = xsk_map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	.map_gen_lookup = xsk_map_gen_lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	.map_update_elem = xsk_map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	.map_delete_elem = xsk_map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	.map_check_btf = map_check_no_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	.map_btf_name = "xsk_map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	.map_btf_id = &xsk_map_btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };