Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Ring buffer operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2020 Facebook, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #ifndef _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <sys/epoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "libbpf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "libbpf_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "bpf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) struct ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	ring_buffer_sample_fn sample_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unsigned long *consumer_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	unsigned long *producer_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	int map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) struct ring_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct epoll_event *events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct ring *rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	size_t page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int epoll_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	int ring_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (r->consumer_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		munmap(r->consumer_pos, rb->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		r->consumer_pos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (r->producer_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		r->producer_pos = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* Add extra RINGBUF maps to this ring buffer manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) int ring_buffer__add(struct ring_buffer *rb, int map_fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		     ring_buffer_sample_fn sample_cb, void *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct bpf_map_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	__u32 len = sizeof(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct epoll_event *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct ring *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			map_fd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (info.type != BPF_MAP_TYPE_RINGBUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			map_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	rb->rings = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	rb->events = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	r = &rb->rings[rb->ring_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	memset(r, 0, sizeof(*r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	r->map_fd = map_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	r->sample_cb = sample_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	r->ctx = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	r->mask = info.max_entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/* Map writable consumer page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		   map_fd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (tmp == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			map_fd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	r->consumer_pos = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/* Map read-only producer page and data pages. We map twice as big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * data size to allow simple reading of samples that wrap around the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * end of a ring buffer. See kernel implementation for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	tmp = mmap(NULL, rb->page_size + 2 * info.max_entries, PROT_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		   MAP_SHARED, map_fd, rb->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (tmp == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		ringbuf_unmap_ring(rb, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			map_fd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	r->producer_pos = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	r->data = tmp + rb->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	e = &rb->events[rb->ring_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	memset(e, 0, sizeof(*e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	e->events = EPOLLIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	e->data.fd = rb->ring_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		ringbuf_unmap_ring(rb, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			map_fd, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	rb->ring_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void ring_buffer__free(struct ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	for (i = 0; i < rb->ring_cnt; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		ringbuf_unmap_ring(rb, &rb->rings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (rb->epoll_fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		close(rb->epoll_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	free(rb->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	free(rb->rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct ring_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		 const struct ring_buffer_opts *opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct ring_buffer *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!OPTS_VALID(opts, ring_buffer_opts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	rb = calloc(1, sizeof(*rb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	rb->page_size = getpagesize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (rb->epoll_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		err = -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	ring_buffer__free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline int roundup_len(__u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	/* clear out top 2 bits (discard and busy, if set) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	len <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	len >>= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	/* add length prefix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	len += BPF_RINGBUF_HDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	/* round up to 8 byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	return (len + 7) / 8 * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int64_t ringbuf_process_ring(struct ring* r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	int *len_ptr, len, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	/* 64-bit to avoid overflow in case of extreme application behavior */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	int64_t cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	unsigned long cons_pos, prod_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	bool got_new_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	void *sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	cons_pos = smp_load_acquire(r->consumer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		got_new_data = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		prod_pos = smp_load_acquire(r->producer_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		while (cons_pos < prod_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			len_ptr = r->data + (cons_pos & r->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			len = smp_load_acquire(len_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			/* sample not committed yet, bail out for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			if (len & BPF_RINGBUF_BUSY_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			got_new_data = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			cons_pos += roundup_len(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 				sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 				err = r->sample_cb(r->ctx, sample, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 				if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 					/* update consumer pos and bail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 					smp_store_release(r->consumer_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 							  cons_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 				cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			smp_store_release(r->consumer_pos, cons_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	} while (got_new_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Consume available ring buffer(s) data without event polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * Returns number of records consumed across all registered ring buffers (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  * INT_MAX, whichever is less), or negative number if any of the callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int ring_buffer__consume(struct ring_buffer *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	int64_t err, res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	for (i = 0; i < rb->ring_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		struct ring *ring = &rb->rings[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		err = ringbuf_process_ring(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		res += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (res > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Poll for available data and consume records, if any are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * Returns number of records consumed (or INT_MAX, whichever is less), or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * negative number, if any of the registered callbacks returned error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	int i, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	int64_t err, res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return -errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	for (i = 0; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		__u32 ring_id = rb->events[i].data.fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		struct ring *ring = &rb->rings[ring_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		err = ringbuf_process_ring(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		res += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (res > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		return INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }