^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * AF_XDP user-space access library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright(c) 2018 - 2019 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef __LIBBPF_XSK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define __LIBBPF_XSK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <stdint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/if_xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "libbpf.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "libbpf_util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef __cplusplus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) extern "C" {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Do not access these members directly. Use the functions below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DEFINE_XSK_RING(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct name { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __u32 cached_prod; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __u32 cached_cons; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __u32 mask; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __u32 size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) __u32 *producer; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __u32 *consumer; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void *ring; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __u32 *flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) DEFINE_XSK_RING(xsk_ring_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) DEFINE_XSK_RING(xsk_ring_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* For a detailed explanation on the memory barriers associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * ring, please take a look at net/xdp/xsk_queue.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct xsk_umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct xsk_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __u64 *addrs = (__u64 *)fill->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return &addrs[idx & fill->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline const __u64 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) const __u64 *addrs = (const __u64 *)comp->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return &addrs[idx & comp->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return &descs[idx & tx->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline const struct xdp_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return &descs[idx & rx->mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return *r->flags & XDP_RING_NEED_WAKEUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) __u32 free_entries = r->cached_cons - r->cached_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (free_entries >= nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return free_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Refresh the local tail pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * cached_cons is r->size bigger than the real consumer pointer so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * that this addition can be avoided in the more frequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * executed code that computs free_entries in the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * this function. Without this optimization it whould have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * free_entries = r->cached_prod - r->cached_cons + r->size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) r->cached_cons = *r->consumer + r->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return r->cached_cons - r->cached_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __u32 entries = r->cached_prod - r->cached_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (entries == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) r->cached_prod = *r->producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) entries = r->cached_prod - r->cached_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return (entries > nb) ? nb : entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) size_t nb, __u32 *idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (xsk_prod_nb_free(prod, nb) < nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *idx = prod->cached_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) prod->cached_prod += nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Make sure everything has been written to the ring before indicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * this to the kernel by writing the producer pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) libbpf_smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *prod->producer += nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) size_t nb, __u32 *idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) size_t entries = xsk_cons_nb_avail(cons, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (entries > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Make sure we do not speculatively read the data before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * we have received the packet buffers from the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) libbpf_smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *idx = cons->cached_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cons->cached_cons += entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Make sure data has been read before indicating we are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * with the entries by updating the consumer pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) libbpf_smp_rwmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *cons->consumer += nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return &((char *)umem_area)[addr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline __u64 xsk_umem__extract_addr(__u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline __u64 xsk_umem__extract_offset(__u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define XSK_UMEM__DEFAULT_FLAGS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct xsk_umem_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __u32 fill_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) __u32 comp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) __u32 frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __u32 frame_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Flags for the libbpf_flags field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct xsk_socket_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __u32 rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) __u32 tx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) __u32 libbpf_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __u32 xdp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __u16 bind_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Set config to NULL to get the default configuration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void *umem_area, __u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct xsk_ring_prod *fill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct xsk_ring_cons *comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) const struct xsk_umem_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void *umem_area, __u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct xsk_ring_prod *fill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct xsk_ring_cons *comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) const struct xsk_umem_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void *umem_area, __u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct xsk_ring_prod *fill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct xsk_ring_cons *comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const struct xsk_umem_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) const char *ifname, __u32 queue_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct xsk_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct xsk_ring_cons *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct xsk_ring_prod *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const struct xsk_socket_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) LIBBPF_API int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) const char *ifname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __u32 queue_id, struct xsk_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct xsk_ring_cons *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct xsk_ring_prod *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct xsk_ring_prod *fill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct xsk_ring_cons *comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) const struct xsk_socket_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Returns 0 for success and -EBUSY if the umem is still in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef __cplusplus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } /* extern "C" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #endif /* __LIBBPF_XSK_H */