^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * queue_stack_maps.c: BPF queue and stack maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2018 Politecnico di Torino
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "percpu_freelist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define QUEUE_STACK_CREATE_FLAG_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct bpf_queue_stack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct bpf_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 size; /* max_entries + 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) char elements[] __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return container_of(map, struct bpf_queue_stack, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return qs->head == qs->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u32 head = qs->head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (unlikely(head >= qs->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return head == qs->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Called from syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int queue_stack_map_alloc_check(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* check sanity of attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (attr->max_entries == 0 || attr->key_size != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) attr->value_size == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) !bpf_map_flags_access_ok(attr->map_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (attr->value_size > KMALLOC_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* if value_size is bigger, the user space won't be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * access the elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int ret, numa_node = bpf_map_attr_numa_node(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct bpf_map_memory mem = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct bpf_queue_stack *qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u64 size, queue_size, cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) size = (u64) attr->max_entries + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) cost = queue_size = sizeof(*qs) + size * attr->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ret = bpf_map_charge_init(&mem, cost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) qs = bpf_map_area_alloc(queue_size, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!qs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bpf_map_charge_finish(&mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) memset(qs, 0, sizeof(*qs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bpf_map_init_from_attr(&qs->map, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bpf_map_charge_move(&qs->map.memory, &mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) qs->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) raw_spin_lock_init(&qs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return &qs->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void queue_stack_map_free(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct bpf_queue_stack *qs = bpf_queue_stack(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bpf_map_area_free(qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct bpf_queue_stack *qs = bpf_queue_stack(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) raw_spin_lock_irqsave(&qs->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (queue_stack_map_is_empty(qs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) memset(value, 0, qs->map.value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ptr = &qs->elements[qs->tail * qs->map.value_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) memcpy(value, ptr, qs->map.value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (unlikely(++qs->tail >= qs->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) qs->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) raw_spin_unlock_irqrestore(&qs->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct bpf_queue_stack *qs = bpf_queue_stack(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) raw_spin_lock_irqsave(&qs->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (queue_stack_map_is_empty(qs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) memset(value, 0, qs->map.value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) index = qs->head - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (unlikely(index >= qs->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) index = qs->size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ptr = &qs->elements[index * qs->map.value_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) memcpy(value, ptr, qs->map.value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) qs->head = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) raw_spin_unlock_irqrestore(&qs->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static int queue_map_peek_elem(struct bpf_map *map, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return __queue_map_get(map, value, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static int stack_map_peek_elem(struct bpf_map *map, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return __stack_map_get(map, value, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static int queue_map_pop_elem(struct bpf_map *map, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return __queue_map_get(map, value, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int stack_map_pop_elem(struct bpf_map *map, void *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return __stack_map_get(map, value, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct bpf_queue_stack *qs = bpf_queue_stack(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) void *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* BPF_EXIST is used to force making room for a new element in case the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * map is full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool replace = (flags & BPF_EXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Check supported flags for queue and stack maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (flags & BPF_NOEXIST || flags > BPF_EXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) raw_spin_lock_irqsave(&qs->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (queue_stack_map_is_full(qs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!replace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* advance tail pointer to overwrite oldest element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (unlikely(++qs->tail >= qs->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) qs->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dst = &qs->elements[qs->head * qs->map.value_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) memcpy(dst, value, qs->map.value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (unlikely(++qs->head >= qs->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) qs->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) void *value, u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Called from syscall or from eBPF program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Called from syscall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void *next_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int queue_map_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const struct bpf_map_ops queue_map_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .map_meta_equal = bpf_map_meta_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .map_alloc_check = queue_stack_map_alloc_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .map_alloc = queue_stack_map_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .map_free = queue_stack_map_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .map_lookup_elem = queue_stack_map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .map_update_elem = queue_stack_map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .map_delete_elem = queue_stack_map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) .map_push_elem = queue_stack_map_push_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) .map_pop_elem = queue_map_pop_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .map_peek_elem = queue_map_peek_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .map_get_next_key = queue_stack_map_get_next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .map_btf_name = "bpf_queue_stack",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .map_btf_id = &queue_map_btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int stack_map_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) const struct bpf_map_ops stack_map_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .map_meta_equal = bpf_map_meta_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .map_alloc_check = queue_stack_map_alloc_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .map_alloc = queue_stack_map_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .map_free = queue_stack_map_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .map_lookup_elem = queue_stack_map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) .map_update_elem = queue_stack_map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .map_delete_elem = queue_stack_map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .map_push_elem = queue_stack_map_push_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .map_pop_elem = stack_map_pop_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .map_peek_elem = stack_map_peek_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .map_get_next_key = queue_stack_map_get_next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .map_btf_name = "bpf_queue_stack",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .map_btf_id = &stack_map_btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) };