^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* XDP user-space ring structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _LINUX_XSK_QUEUE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _LINUX_XSK_QUEUE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/if_xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <net/xdp_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <net/xsk_buff_pool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xsk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct xdp_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u32 producer ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* Hinder the adjacent cache prefetcher to prefetch the consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * pointer if the producer pointer is touched and vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u32 pad ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u32 consumer ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Used for the RX and TX queues for packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct xdp_rxtx_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct xdp_ring ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct xdp_desc desc[] ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Used for the fill and completion queues for buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct xdp_umem_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct xdp_ring ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u64 desc[] ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct xsk_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u32 cached_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u32 cached_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct xdp_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u64 invalid_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u64 queue_empty_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* The structure of the shared state of the rings are the same as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * ring, the kernel is the producer and user space is the consumer. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the Tx and fill rings, the kernel is the consumer and user space is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * the producer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * producer consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * if (LOAD ->consumer) { LOAD ->producer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * (A) smp_rmb() (C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * STORE $data LOAD $data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * smp_wmb() (B) smp_mb() (D)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * STORE ->producer STORE ->consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * (A) pairs with (D), and (B) pairs with (C).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Starting with (B), it protects the data from being written after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * the producer pointer. If this barrier was missing, the consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * could observe the producer pointer being set and thus load the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * before the producer has written the new data. The consumer would in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * this case load the old data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * (C) protects the consumer from speculatively loading the data before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the producer pointer actually has been read. If we do not have this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * barrier, some architectures could load old data as speculative loads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * are not discarded as the CPU does not know there is a dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * between ->producer and data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * (A) is a control dependency that separates the load of ->consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * from the stores of $data. In case ->consumer indicates there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * room in the buffer to store $data we do not. So no barrier is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * (D) protects the load of the data to be observed to happen after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * store of the consumer pointer. If we did not have this memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * barrier, the producer could observe the consumer pointer being set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * and overwrite the data with a new value before the consumer got the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * chance to read the old value. The consumer would thus miss reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * the old entry and very likely read the new entry twice, once right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * now and again after circling through the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* The operations on the rings are the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * producer consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * RESERVE entries PEEK in the ring for entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * WRITE data into the ring READ data from the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * SUBMIT entries RELEASE entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * The producer reserves one or more entries in the ring. It can then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * fill in these entries and finally submit them so that they can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * seen and read by the consumer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * The consumer peeks into the ring to see if the producer has written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * any new entries. If so, the consumer can then read these entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * and when it is done reading them release them back to the producer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * so that the producer can use these slots to fill in new entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * The function names below reflect these operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Functions that read and validate content from consumer rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (q->cached_cons != q->cached_prod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 idx = q->cached_cons & q->ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *addr = ring->desc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct xdp_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u64 chunk, chunk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) chunk = xp_aligned_extract_addr(pool, desc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (likely(desc->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (chunk != chunk_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (chunk >= pool->addrs_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (desc->options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct xdp_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u64 addr, base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) base_addr = xp_unaligned_extract_addr(desc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) addr = xp_unaligned_add_offset_to_addr(desc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (desc->len > pool->chunk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (desc->options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct xdp_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) xp_aligned_validate_desc(pool, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct xdp_desc *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!xp_validate_desc(pool, d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) q->invalid_descs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static inline bool xskq_cons_read_desc(struct xsk_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct xdp_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) while (q->cached_cons != q->cached_prod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 idx = q->cached_cons & q->ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *desc = ring->desc[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (xskq_cons_is_valid_desc(q, desc, pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) q->cached_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Functions for consumers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static inline void __xskq_cons_release(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) smp_mb(); /* D, matches A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) WRITE_ONCE(q->ring->consumer, q->cached_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline void __xskq_cons_peek(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Refresh the local pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) q->cached_prod = READ_ONCE(q->ring->producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) smp_rmb(); /* C, matches B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline void xskq_cons_get_entries(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __xskq_cons_release(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) __xskq_cons_peek(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 entries = q->cached_prod - q->cached_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (entries >= cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __xskq_cons_peek(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) entries = q->cached_prod - q->cached_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return entries >= cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (q->cached_prod == q->cached_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) xskq_cons_get_entries(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return xskq_cons_read_addr_unchecked(q, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct xdp_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (q->cached_prod == q->cached_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) xskq_cons_get_entries(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return xskq_cons_read_desc(q, desc, pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline void xskq_cons_release(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* To improve performance, only update local state here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Reflect this to global state when we get new entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * from the ring in xskq_cons_get_entries() and whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Rx or Tx processing are completed in the NAPI loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) q->cached_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static inline bool xskq_cons_is_full(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* No barriers needed since data is not accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) q->nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* No barriers needed since data is not accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Functions for producers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static inline bool xskq_prod_is_full(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (free_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Refresh the local tail pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) q->cached_cons = READ_ONCE(q->ring->consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) free_entries = q->nentries - (q->cached_prod - q->cached_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return !free_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline void xskq_prod_cancel(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) q->cached_prod--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline int xskq_prod_reserve(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (xskq_prod_is_full(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* A, matches D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) q->cached_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (xskq_prod_is_full(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* A, matches D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ring->desc[q->cached_prod++ & q->ring_mask] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u64 addr, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (xskq_prod_is_full(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* A, matches D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) idx = q->cached_prod++ & q->ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ring->desc[idx].addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ring->desc[idx].len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) smp_wmb(); /* B, matches C */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) WRITE_ONCE(q->ring->producer, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline void xskq_prod_submit(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) __xskq_prod_submit(q, q->cached_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 idx = q->ring->producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ring->desc[idx++ & q->ring_mask] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __xskq_prod_submit(q, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __xskq_prod_submit(q, q->ring->producer + nb_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static inline bool xskq_prod_is_empty(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* No barriers needed since data is not accessed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* For both producers and consumers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return q ? q->invalid_descs : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return q ? q->queue_empty_descs : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) void xskq_destroy(struct xsk_queue *q_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #endif /* _LINUX_XSK_QUEUE_H */