Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <uapi/linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define RINGBUF_PGOFF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	(offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* consumer page and producer page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define RINGBUF_POS_PAGES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /* Maximum size of ring buffer area is limited by 32-bit page offset within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * record header, counted in pages. Reserve 8 bits for extensibility, and take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * into account few extra pages for consumer/producer pages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define RINGBUF_MAX_DATA_SZ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	(((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct bpf_ringbuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	wait_queue_head_t waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct irq_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	u64 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	spinlock_t spinlock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* Consumer and producer counters are put into separate pages to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	 * mapping consumer page as r/w, but restrict producer page to r/o.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	 * This protects producer position from being modified by user-space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	 * application and ruining in-kernel position tracking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	unsigned long consumer_pos __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned long producer_pos __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	char data[] __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) struct bpf_ringbuf_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct bpf_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct bpf_map_memory memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct bpf_ringbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /* 8-byte ring buffer record header structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) struct bpf_ringbuf_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	u32 pg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	const gfp_t flags = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			    __GFP_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int nr_data_pages = data_sz >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int nr_pages = nr_meta_pages + nr_data_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct page **pages, *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct bpf_ringbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	size_t array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Each data page is mapped twice to allow "virtual"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * continuous read of samples wrapping around the end of ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * buffer area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * ------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * | meta pages |  real data pages  |  same data pages  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * ------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * |            | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 * ------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * |            | TA             DA | TA             DA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * ------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 *                               ^^^^^^^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 *                                  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * Here, no need to worry about special handling of wrapped-around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * data due to double-mapped data pages. This works both in kernel and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * when mmap()'ed in user-space, simplifying both kernel and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * user-space implementations significantly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (array_size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		pages = vmalloc_node(array_size, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		pages = kmalloc_node(array_size, flags, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		page = alloc_pages_node(numa_node, flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			nr_pages = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			goto err_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		pages[i] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		if (i >= nr_meta_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			pages[nr_data_pages + i] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		  VM_MAP | VM_USERMAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		kmemleak_not_leak(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		rb->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		rb->nr_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) err_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	for (i = 0; i < nr_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		__free_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void bpf_ringbuf_notify(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	wake_up_all(&rb->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct bpf_ringbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	spin_lock_init(&rb->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	init_waitqueue_head(&rb->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	init_irq_work(&rb->work, bpf_ringbuf_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	rb->mask = data_sz - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	rb->consumer_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	rb->producer_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	struct bpf_ringbuf_map *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	u64 cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (attr->key_size || attr->value_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	    !is_power_of_2(attr->max_entries) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	    !PAGE_ALIGNED(attr->max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	/* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return ERR_PTR(-E2BIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	if (!rb_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	bpf_map_init_from_attr(&rb_map->map, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	cost = sizeof(struct bpf_ringbuf_map) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	       sizeof(struct bpf_ringbuf) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	       attr->max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	err = bpf_map_charge_init(&rb_map->map.memory, cost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		goto err_free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (IS_ERR(rb_map->rb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		err = PTR_ERR(rb_map->rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		goto err_uncharge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	return &rb_map->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) err_uncharge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	bpf_map_charge_finish(&rb_map->map.memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) err_free_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	kfree(rb_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	/* copy pages pointer and nr_pages to local variable, as we are going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * to unmap rb itself with vunmap() below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct page **pages = rb->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	int i, nr_pages = rb->nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	vunmap(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	for (i = 0; i < nr_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		__free_page(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void ringbuf_map_free(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct bpf_ringbuf_map *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	bpf_ringbuf_free(rb_map->rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	kfree(rb_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return ERR_PTR(-ENOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				   u64 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				    void *next_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct bpf_ringbuf_map *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (vma->vm_flags & VM_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		/* allow writable mapping for the consumer_pos only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		vma->vm_flags &= ~VM_MAYWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	/* remap_vmalloc_range() checks size and offset constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return remap_vmalloc_range(vma, rb_map->rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				   vma->vm_pgoff + RINGBUF_PGOFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	unsigned long cons_pos, prod_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	cons_pos = smp_load_acquire(&rb->consumer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	prod_pos = smp_load_acquire(&rb->producer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return prod_pos - cons_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 				 struct poll_table_struct *pts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct bpf_ringbuf_map *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	poll_wait(filp, &rb_map->rb->waitq, pts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	if (ringbuf_avail_data_sz(rb_map->rb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		return EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int ringbuf_map_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) const struct bpf_map_ops ringbuf_map_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	.map_meta_equal = bpf_map_meta_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	.map_alloc = ringbuf_map_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	.map_free = ringbuf_map_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	.map_mmap = ringbuf_map_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.map_poll = ringbuf_map_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.map_lookup_elem = ringbuf_map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	.map_update_elem = ringbuf_map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.map_delete_elem = ringbuf_map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	.map_get_next_key = ringbuf_map_get_next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	.map_btf_name = "bpf_ringbuf_map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	.map_btf_id = &ringbuf_map_btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * calculate offset from record metadata to ring buffer in pages, rounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * down. This page offset is stored as part of record metadata and allows to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * restore struct bpf_ringbuf * from record pointer. This page offset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * stored at offset 4 of record metadata header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				     struct bpf_ringbuf_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Given pointer to ring buffer record header, restore pointer to struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * bpf_ringbuf itself by using page offset stored at offset 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static struct bpf_ringbuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	unsigned long addr = (unsigned long)(void *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return (void*)((addr & PAGE_MASK) - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	unsigned long cons_pos, prod_pos, new_prod_pos, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	u32 len, pg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct bpf_ringbuf_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (len > rb->mask + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	cons_pos = smp_load_acquire(&rb->consumer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (in_nmi()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		if (!spin_trylock_irqsave(&rb->spinlock, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		spin_lock_irqsave(&rb->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	prod_pos = rb->producer_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	new_prod_pos = prod_pos + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	/* check for out of ringbuf space by ensuring producer position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 * doesn't advance more than (ringbuf_size - 1) ahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (new_prod_pos - cons_pos > rb->mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		spin_unlock_irqrestore(&rb->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	hdr = (void *)rb->data + (prod_pos & rb->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	hdr->len = size | BPF_RINGBUF_BUSY_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	hdr->pg_off = pg_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	/* pairs with consumer's smp_load_acquire() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	smp_store_release(&rb->producer_pos, new_prod_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	spin_unlock_irqrestore(&rb->spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return (void *)hdr + BPF_RINGBUF_HDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct bpf_ringbuf_map *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (unlikely(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	.func		= bpf_ringbuf_reserve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	.ret_type	= RET_PTR_TO_ALLOC_MEM_OR_NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	.arg2_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	.arg3_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	unsigned long rec_pos, cons_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	struct bpf_ringbuf_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	struct bpf_ringbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	u32 new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	hdr = sample - BPF_RINGBUF_HDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	rb = bpf_ringbuf_restore_from_rec(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	if (discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		new_len |= BPF_RINGBUF_DISCARD_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	/* update record header with correct final size prefix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	xchg(&hdr->len, new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	/* if consumer caught up and is waiting for our record, notify about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	 * new data availability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	rec_pos = (void *)hdr - (void *)rb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (flags & BPF_RB_FORCE_WAKEUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		irq_work_queue(&rb->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		irq_work_queue(&rb->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	bpf_ringbuf_commit(sample, flags, false /* discard */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) const struct bpf_func_proto bpf_ringbuf_submit_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	.func		= bpf_ringbuf_submit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	.ret_type	= RET_VOID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	.arg1_type	= ARG_PTR_TO_ALLOC_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	.arg2_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	bpf_ringbuf_commit(sample, flags, true /* discard */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) const struct bpf_func_proto bpf_ringbuf_discard_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	.func		= bpf_ringbuf_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	.ret_type	= RET_VOID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	.arg1_type	= ARG_PTR_TO_ALLOC_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	.arg2_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	   u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct bpf_ringbuf_map *rb_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	void *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	rb_map = container_of(map, struct bpf_ringbuf_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	rec = __bpf_ringbuf_reserve(rb_map->rb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	memcpy(rec, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	bpf_ringbuf_commit(rec, flags, false /* discard */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) const struct bpf_func_proto bpf_ringbuf_output_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	.func		= bpf_ringbuf_output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	.arg2_type	= ARG_PTR_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.arg4_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	struct bpf_ringbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	switch (flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	case BPF_RB_AVAIL_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		return ringbuf_avail_data_sz(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	case BPF_RB_RING_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		return rb->mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	case BPF_RB_CONS_POS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		return smp_load_acquire(&rb->consumer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	case BPF_RB_PROD_POS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		return smp_load_acquire(&rb->producer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) const struct bpf_func_proto bpf_ringbuf_query_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	.func		= bpf_ringbuf_query,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	.ret_type	= RET_INTEGER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	.arg1_type	= ARG_CONST_MAP_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	.arg2_type	= ARG_ANYTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) };