^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 2020 Intel Corporation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <sys/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <sys/resource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <sys/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <sys/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <getopt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <netinet/ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/if_link.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/if_xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <bpf/libbpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <bpf/xsk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <bpf/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) typedef __u64 u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) typedef __u32 u32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) typedef __u16 u16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) typedef __u8 u8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* This program illustrates the packet forwarding between multiple AF_XDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * sockets in multi-threaded environment. All threads are sharing a common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * buffer pool, with each socket having its own private buffer cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Example 1: Single thread handling two sockets. The packets received by socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * A (interface IFA, queue QA) are forwarded to socket B (interface IFB, queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * QB), while the packets received by socket B are forwarded to socket A. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * thread is running on CPU core X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * ./xsk_fwd -i IFA -q QA -i IFB -q QB -c X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Example 2: Two threads, each handling two sockets. The thread running on CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * core X forwards all the packets received by socket A to socket B, and all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * packets received by socket B to socket A. The thread running on CPU core Y is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * performing the same packet forwarding between sockets C and D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * ./xsk_fwd -i IFA -q QA -i IFB -q QB -i IFC -q QC -i IFD -q QD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * -c CX -c CY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Buffer pool and buffer cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * For packet forwarding, the packet buffers are typically allocated from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * pool for packet reception and freed back to the pool for further reuse once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * the packet transmission is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * The buffer pool is shared between multiple threads. In order to minimize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * access latency to the shared buffer pool, each thread creates one (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * several) buffer caches, which, unlike the buffer pool, are private to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * thread that creates them and therefore cannot be shared with other threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * The access to the shared pool is only needed either (A) when the cache gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * empty due to repeated buffer allocations and it needs to be replenished from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * the pool, or (B) when the cache gets full due to repeated buffer free and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * needs to be flushed back to the pull.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * In a packet forwarding system, a packet received on any input port can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * potentially be transmitted on any output port, depending on the forwarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * configuration. For AF_XDP sockets, for this to work with zero-copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * packet buffers when, it is required that the buffer pool memory fits into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * UMEM area shared by all the sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct bpool_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 n_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int mmap_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u32 n_users_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* This buffer pool implementation organizes the buffers into equally sized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * slabs of *n_buffers_per_slab*. Initially, there are *n_slabs* slabs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * pool that are completely filled with buffer pointers (full slabs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Each buffer cache has a slab for buffer allocation and a slab for buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * free, with both of these slabs initially empty. When the cache's allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * slab goes empty, it is swapped with one of the available full slabs from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * pool, if any is available. When the cache's free slab goes full, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * swapped for one of the empty slabs from the pool, which is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Partially filled slabs never get traded between the cache and the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * (except when the cache itself is destroyed), which enables fast operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * through pointer swapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct bpool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct bpool_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pthread_mutex_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u64 **slabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u64 **slabs_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u64 *buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u64 *buffers_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u64 n_slabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u64 n_slabs_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u64 n_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u64 n_slabs_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u64 n_slabs_reserved_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct xsk_umem_config umem_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct xsk_ring_prod umem_fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct xsk_ring_cons umem_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct xsk_umem *umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct bpool *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bpool_init(struct bpool_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct xsk_umem_config *umem_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u64 n_slabs, n_slabs_reserved, n_buffers, n_buffers_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u64 slabs_size, slabs_reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 buffers_size, buffers_reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u64 total_size, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct bpool *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u8 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* mmap prep. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (setrlimit(RLIMIT_MEMLOCK, &r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* bpool internals dimensioning. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) n_slabs = (params->n_buffers + params->n_buffers_per_slab - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) params->n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) n_slabs_reserved = params->n_users_max * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) n_buffers = n_slabs * params->n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) n_buffers_reserved = n_slabs_reserved * params->n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) slabs_size = n_slabs * sizeof(u64 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) slabs_reserved_size = n_slabs_reserved * sizeof(u64 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) buffers_size = n_buffers * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) buffers_reserved_size = n_buffers_reserved * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) total_size = sizeof(struct bpool) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) slabs_size + slabs_reserved_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) buffers_size + buffers_reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* bpool memory allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) p = calloc(total_size, sizeof(u8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* bpool memory initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bp = (struct bpool *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) memcpy(&bp->params, params, sizeof(*params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bp->params.n_buffers = n_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bp->slabs = (u64 **)&p[sizeof(struct bpool)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bp->slabs_reserved = (u64 **)&p[sizeof(struct bpool) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) slabs_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bp->buffers = (u64 *)&p[sizeof(struct bpool) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) slabs_size + slabs_reserved_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bp->buffers_reserved = (u64 *)&p[sizeof(struct bpool) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) slabs_size + slabs_reserved_size + buffers_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bp->n_slabs = n_slabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) bp->n_slabs_reserved = n_slabs_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) bp->n_buffers = n_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (i = 0; i < n_slabs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bp->slabs[i] = &bp->buffers[i * params->n_buffers_per_slab];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) bp->n_slabs_available = n_slabs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) for (i = 0; i < n_slabs_reserved; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bp->slabs_reserved[i] = &bp->buffers_reserved[i *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) params->n_buffers_per_slab];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bp->n_slabs_reserved_available = n_slabs_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) for (i = 0; i < n_buffers; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) bp->buffers[i] = i * params->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) status = pthread_mutex_init(&bp->lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* mmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bp->addr = mmap(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) n_buffers * params->buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) PROT_READ | PROT_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) MAP_PRIVATE | MAP_ANONYMOUS | params->mmap_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (bp->addr == MAP_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pthread_mutex_destroy(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* umem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) status = xsk_umem__create(&bp->umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) bp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bp->params.n_buffers * bp->params.buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) &bp->umem_fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) &bp->umem_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) umem_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) munmap(bp->addr, bp->params.n_buffers * bp->params.buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) pthread_mutex_destroy(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) memcpy(&bp->umem_cfg, umem_cfg, sizeof(*umem_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) bpool_free(struct bpool *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) xsk_umem__delete(bp->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) munmap(bp->addr, bp->params.n_buffers * bp->params.buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pthread_mutex_destroy(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) free(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct bcache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct bpool *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u64 *slab_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) u64 *slab_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) u64 n_buffers_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u64 n_buffers_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bcache_slab_size(struct bcache *bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct bpool *bp = bc->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return bp->params.n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static struct bcache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bcache_init(struct bpool *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct bcache *bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bc = calloc(1, sizeof(struct bcache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bc->bp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) bc->n_buffers_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bc->n_buffers_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pthread_mutex_lock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (bp->n_slabs_reserved_available == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pthread_mutex_unlock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) free(bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bc->slab_cons = bp->slabs_reserved[bp->n_slabs_reserved_available - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) bc->slab_prod = bp->slabs_reserved[bp->n_slabs_reserved_available - 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) bp->n_slabs_reserved_available -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pthread_mutex_unlock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bcache_free(struct bcache *bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct bpool *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* In order to keep this example simple, the case of freeing any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * existing buffers from the cache back to the pool is ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bp = bc->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pthread_mutex_lock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bp->slabs_reserved[bp->n_slabs_reserved_available] = bc->slab_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bp->slabs_reserved[bp->n_slabs_reserved_available + 1] = bc->slab_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bp->n_slabs_reserved_available += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pthread_mutex_unlock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) free(bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* To work correctly, the implementation requires that the *n_buffers* input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * argument is never greater than the buffer pool's *n_buffers_per_slab*. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * is typically the case, with one exception taking place when large number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * buffers are allocated at init time (e.g. for the UMEM fill queue setup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bcache_cons_check(struct bcache *bc, u32 n_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct bpool *bp = bc->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u64 n_buffers_per_slab = bp->params.n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u64 n_buffers_cons = bc->n_buffers_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u64 n_slabs_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u64 *slab_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Consumer slab is not empty: Use what's available locally. Do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * look for more buffers from the pool when the ask can only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * partially satisfied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (n_buffers_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return (n_buffers_cons < n_buffers) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) n_buffers_cons :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) n_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Consumer slab is empty: look to trade the current consumer slab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * (full) for a full slab from the pool, if any is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pthread_mutex_lock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) n_slabs_available = bp->n_slabs_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (!n_slabs_available) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pthread_mutex_unlock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) n_slabs_available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) slab_full = bp->slabs[n_slabs_available];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) bp->slabs[n_slabs_available] = bc->slab_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) bp->n_slabs_available = n_slabs_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pthread_mutex_unlock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) bc->slab_cons = slab_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) bc->n_buffers_cons = n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return n_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) bcache_cons(struct bcache *bc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u64 n_buffers_cons = bc->n_buffers_cons - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u64 buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) buffer = bc->slab_cons[n_buffers_cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) bc->n_buffers_cons = n_buffers_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bcache_prod(struct bcache *bc, u64 buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct bpool *bp = bc->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u64 n_buffers_per_slab = bp->params.n_buffers_per_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u64 n_buffers_prod = bc->n_buffers_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) u64 n_slabs_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u64 *slab_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * Producer slab is not yet full: store the current buffer to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (n_buffers_prod < n_buffers_per_slab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) bc->slab_prod[n_buffers_prod] = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bc->n_buffers_prod = n_buffers_prod + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Producer slab is full: trade the cache's current producer slab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * (full) for an empty slab from the pool, then store the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * buffer to the new producer slab. As one full slab exists in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * cache, it is guaranteed that there is at least one empty slab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * available in the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pthread_mutex_lock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) n_slabs_available = bp->n_slabs_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) slab_empty = bp->slabs[n_slabs_available];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bp->slabs[n_slabs_available] = bc->slab_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) bp->n_slabs_available = n_slabs_available + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) pthread_mutex_unlock(&bp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) slab_empty[0] = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) bc->slab_prod = slab_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) bc->n_buffers_prod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * Port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Each of the forwarding ports sits on top of an AF_XDP socket. In order for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * packet forwarding to happen with no packet buffer copy, all the sockets need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * to share the same UMEM area, which is used as the buffer pool memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #ifndef MAX_BURST_RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define MAX_BURST_RX 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #ifndef MAX_BURST_TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #define MAX_BURST_TX 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct burst_rx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) u64 addr[MAX_BURST_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u32 len[MAX_BURST_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct burst_tx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u64 addr[MAX_BURST_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) u32 len[MAX_BURST_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u32 n_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct port_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct xsk_socket_config xsk_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct bpool *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const char *iface;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) u32 iface_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct port_params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct bcache *bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct xsk_ring_cons rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct xsk_ring_prod txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct xsk_ring_prod umem_fq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct xsk_ring_cons umem_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct xsk_socket *xsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int umem_fq_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) u64 n_pkts_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u64 n_pkts_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) port_free(struct port *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* To keep this example simple, the code to free the buffers from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * socket's receive and transmit queues, as well as from the UMEM fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * and completion queues, is not included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (p->xsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) xsk_socket__delete(p->xsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) bcache_free(p->bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static struct port *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) port_init(struct port_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) u32 umem_fq_size, pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Memory allocation and initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) p = calloc(sizeof(struct port), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) memcpy(&p->params, params, sizeof(p->params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) umem_fq_size = params->bp->umem_cfg.fill_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* bcache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) p->bc = bcache_init(params->bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!p->bc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) (bcache_slab_size(p->bc) < umem_fq_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) (bcache_cons_check(p->bc, umem_fq_size) < umem_fq_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) port_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* xsk socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) status = xsk_socket__create_shared(&p->xsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) params->iface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) params->iface_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) params->bp->umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) &p->rxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) &p->txq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) &p->umem_fq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) &p->umem_cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ¶ms->xsk_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) port_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* umem fq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) xsk_ring_prod__reserve(&p->umem_fq, umem_fq_size, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) for (i = 0; i < umem_fq_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) *xsk_ring_prod__fill_addr(&p->umem_fq, pos + i) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) bcache_cons(p->bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xsk_ring_prod__submit(&p->umem_fq, umem_fq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) p->umem_fq_initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) port_rx_burst(struct port *p, struct burst_rx *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u32 n_pkts, pos, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Free buffers for FQ replenish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) n_pkts = ARRAY_SIZE(b->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) n_pkts = bcache_cons_check(p->bc, n_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!n_pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* RXQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) n_pkts = xsk_ring_cons__peek(&p->rxq, n_pkts, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!n_pkts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (xsk_ring_prod__needs_wakeup(&p->umem_fq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct pollfd pollfd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) .fd = xsk_socket__fd(p->xsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) .events = POLLIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) poll(&pollfd, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) for (i = 0; i < n_pkts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) b->addr[i] = xsk_ring_cons__rx_desc(&p->rxq, pos + i)->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) b->len[i] = xsk_ring_cons__rx_desc(&p->rxq, pos + i)->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) xsk_ring_cons__release(&p->rxq, n_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) p->n_pkts_rx += n_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* UMEM FQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) for ( ; ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) status = xsk_ring_prod__reserve(&p->umem_fq, n_pkts, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (status == n_pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (xsk_ring_prod__needs_wakeup(&p->umem_fq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct pollfd pollfd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) .fd = xsk_socket__fd(p->xsk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) .events = POLLIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) poll(&pollfd, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) for (i = 0; i < n_pkts; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) *xsk_ring_prod__fill_addr(&p->umem_fq, pos + i) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) bcache_cons(p->bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) xsk_ring_prod__submit(&p->umem_fq, n_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return n_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) port_tx_burst(struct port *p, struct burst_tx *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u32 n_pkts, pos, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* UMEM CQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) n_pkts = p->params.bp->umem_cfg.comp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) n_pkts = xsk_ring_cons__peek(&p->umem_cq, n_pkts, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) for (i = 0; i < n_pkts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u64 addr = *xsk_ring_cons__comp_addr(&p->umem_cq, pos + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) bcache_prod(p->bc, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) xsk_ring_cons__release(&p->umem_cq, n_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* TXQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) n_pkts = b->n_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) for ( ; ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) status = xsk_ring_prod__reserve(&p->txq, n_pkts, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (status == n_pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (xsk_ring_prod__needs_wakeup(&p->txq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) sendto(xsk_socket__fd(p->xsk), NULL, 0, MSG_DONTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) for (i = 0; i < n_pkts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) xsk_ring_prod__tx_desc(&p->txq, pos + i)->addr = b->addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) xsk_ring_prod__tx_desc(&p->txq, pos + i)->len = b->len[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) xsk_ring_prod__submit(&p->txq, n_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (xsk_ring_prod__needs_wakeup(&p->txq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sendto(xsk_socket__fd(p->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) p->n_pkts_tx += n_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * Thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Packet forwarding threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) #ifndef MAX_PORTS_PER_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #define MAX_PORTS_PER_THREAD 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct thread_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct port *ports_rx[MAX_PORTS_PER_THREAD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct port *ports_tx[MAX_PORTS_PER_THREAD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u32 n_ports_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct burst_rx burst_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct burst_tx burst_tx[MAX_PORTS_PER_THREAD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) u32 cpu_core_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int quit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void swap_mac_addresses(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct ether_header *eth = (struct ether_header *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct ether_addr *src_addr = (struct ether_addr *)ð->ether_shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct ether_addr *dst_addr = (struct ether_addr *)ð->ether_dhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct ether_addr tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) tmp = *src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *src_addr = *dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *dst_addr = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) thread_func(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct thread_data *t = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) cpu_set_t cpu_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) CPU_ZERO(&cpu_cores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) CPU_SET(t->cpu_core_id, &cpu_cores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_cores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) for (i = 0; !t->quit; i = (i + 1) & (t->n_ports_rx - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct port *port_rx = t->ports_rx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct port *port_tx = t->ports_tx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct burst_rx *brx = &t->burst_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct burst_tx *btx = &t->burst_tx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 n_pkts, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* RX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) n_pkts = port_rx_burst(port_rx, brx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!n_pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Process & TX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) for (j = 0; j < n_pkts; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) u64 addr = xsk_umem__add_offset_to_addr(brx->addr[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u8 *pkt = xsk_umem__get_data(port_rx->params.bp->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) swap_mac_addresses(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) btx->addr[btx->n_pkts] = brx->addr[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) btx->len[btx->n_pkts] = brx->len[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) btx->n_pkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (btx->n_pkts == MAX_BURST_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) port_tx_burst(port_tx, btx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) btx->n_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * Process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static const struct bpool_params bpool_params_default = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) .n_buffers = 64 * 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) .buffer_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) .mmap_flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) .n_users_max = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) .n_buffers_per_slab = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static const struct xsk_umem_config umem_cfg_default = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static const struct port_params port_params_default = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) .xsk_cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) .rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) .libbpf_flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .xdp_flags = XDP_FLAGS_DRV_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) .bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .bp = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .iface = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .iface_queue = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #ifndef MAX_PORTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) #define MAX_PORTS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #ifndef MAX_THREADS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #define MAX_THREADS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static struct bpool_params bpool_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static struct xsk_umem_config umem_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static struct bpool *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static struct port_params port_params[MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static struct port *ports[MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static u64 n_pkts_rx[MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static u64 n_pkts_tx[MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int n_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static pthread_t threads[MAX_THREADS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static struct thread_data thread_data[MAX_THREADS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static int n_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) print_usage(char *prog_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) const char *usage =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) "Usage:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) "\t%s [ -b SIZE ] -c CORE -i INTERFACE [ -q QUEUE ]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) "-c CORE CPU core to run a packet forwarding thread\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) " on. May be invoked multiple times.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) "-b SIZE Number of buffers in the buffer pool shared\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) " by all the forwarding threads. Default: %u.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) "-i INTERFACE Network interface. Each (INTERFACE, QUEUE)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) " pair specifies one forwarding port. May be\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) " invoked multiple times.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) "-q QUEUE Network interface queue for RX and TX. Each\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) " (INTERFACE, QUEUE) pair specified one\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) " forwarding port. Default: %u. May be invoked\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) " multiple times.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) "\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) printf(usage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) prog_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) bpool_params_default.n_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) port_params_default.iface_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) parse_args(int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct option lgopts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) { NULL, 0, 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int opt, option_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Parse the input arguments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) for ( ; ;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) opt = getopt_long(argc, argv, "c:i:q:", lgopts, &option_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (opt == EOF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) switch (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) case 'b':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) bpool_params.n_buffers = atoi(optarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) case 'c':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (n_threads == MAX_THREADS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) printf("Max number of threads (%d) reached.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) MAX_THREADS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) thread_data[n_threads].cpu_core_id = atoi(optarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) n_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) case 'i':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (n_ports == MAX_PORTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) printf("Max number of ports (%d) reached.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) MAX_PORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) port_params[n_ports].iface = optarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) port_params[n_ports].iface_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) n_ports++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) case 'q':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (n_ports == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) printf("No port specified for queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) port_params[n_ports - 1].iface_queue = atoi(optarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) printf("Illegal argument.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) optind = 1; /* reset getopt lib */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Check the input arguments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (!n_ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) printf("No ports specified.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!n_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) printf("No threads specified.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (n_ports % n_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) printf("Ports cannot be evenly distributed to threads.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) print_port(u32 port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct port *port = ports[port_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) printf("Port %u: interface = %s, queue = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) port_id, port->params.iface, port->params.iface_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) print_thread(u32 thread_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct thread_data *t = &thread_data[thread_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) printf("Thread %u (CPU core %u): ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) thread_id, t->cpu_core_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) for (i = 0; i < t->n_ports_rx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct port *port_rx = t->ports_rx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct port *port_tx = t->ports_tx[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) printf("(%s, %u) -> (%s, %u), ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) port_rx->params.iface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) port_rx->params.iface_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) port_tx->params.iface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) port_tx->params.iface_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) print_port_stats_separator(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) printf("+-%4s-+-%12s-+-%13s-+-%12s-+-%13s-+\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) "----",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) "------------",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) "-------------",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) "------------",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) "-------------");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) print_port_stats_header(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) print_port_stats_separator();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) printf("| %4s | %12s | %13s | %12s | %13s |\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) "Port",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) "RX packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) "RX rate (pps)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) "TX packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) "TX_rate (pps)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) print_port_stats_separator();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) print_port_stats_trailer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) print_port_stats_separator();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) printf("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) print_port_stats(int port_id, u64 ns_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct port *p = ports[port_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) double rx_pps, tx_pps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rx_pps = (p->n_pkts_rx - n_pkts_rx[port_id]) * 1000000000. / ns_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) tx_pps = (p->n_pkts_tx - n_pkts_tx[port_id]) * 1000000000. / ns_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) printf("| %4d | %12llu | %13.0f | %12llu | %13.0f |\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) p->n_pkts_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) rx_pps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) p->n_pkts_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) tx_pps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) n_pkts_rx[port_id] = p->n_pkts_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) n_pkts_tx[port_id] = p->n_pkts_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) print_port_stats_all(u64 ns_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) print_port_stats_header();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) for (i = 0; i < n_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) print_port_stats(i, ns_diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) print_port_stats_trailer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static int quit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) signal_handler(int sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) quit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static void remove_xdp_program(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) for (i = 0 ; i < n_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) bpf_set_link_xdp_fd(if_nametoindex(port_params[i].iface), -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) port_params[i].xsk_cfg.xdp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int main(int argc, char **argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct timespec time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u64 ns0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Parse args. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) memcpy(&bpool_params, &bpool_params_default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) sizeof(struct bpool_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) memcpy(&umem_cfg, &umem_cfg_default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) sizeof(struct xsk_umem_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) for (i = 0; i < MAX_PORTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) memcpy(&port_params[i], &port_params_default,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) sizeof(struct port_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (parse_args(argc, argv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) print_usage(argv[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Buffer pool initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) bp = bpool_init(&bpool_params, &umem_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) printf("Buffer pool initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) printf("Buffer pool created successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /* Ports initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) for (i = 0; i < MAX_PORTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) port_params[i].bp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) for (i = 0; i < n_ports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ports[i] = port_init(&port_params[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!ports[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) printf("Port %d initialization failed.\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) print_port(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) printf("All ports created successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Threads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0; i < n_threads; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct thread_data *t = &thread_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) u32 n_ports_per_thread = n_ports / n_threads, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) for (j = 0; j < n_ports_per_thread; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) t->ports_rx[j] = ports[i * n_ports_per_thread + j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) t->ports_tx[j] = ports[i * n_ports_per_thread +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) (j + 1) % n_ports_per_thread];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) t->n_ports_rx = n_ports_per_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) print_thread(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) for (i = 0; i < n_threads; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) status = pthread_create(&threads[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) thread_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) &thread_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) printf("Thread %d creation failed.\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) printf("All threads created successfully.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Print statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) signal(SIGINT, signal_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) signal(SIGTERM, signal_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) signal(SIGABRT, signal_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) clock_gettime(CLOCK_MONOTONIC, &time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ns0 = time.tv_sec * 1000000000UL + time.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) for ( ; !quit; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) u64 ns1, ns_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) sleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) clock_gettime(CLOCK_MONOTONIC, &time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ns1 = time.tv_sec * 1000000000UL + time.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ns_diff = ns1 - ns0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ns0 = ns1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) print_port_stats_all(ns_diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* Threads completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) printf("Quit.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) for (i = 0; i < n_threads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) thread_data[i].quit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) for (i = 0; i < n_threads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pthread_join(threads[i], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) for (i = 0; i < n_ports; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) port_free(ports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) bpool_free(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) remove_xdp_program();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }