^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* XDP user-space ring structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/overflow.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/xdp_sock_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xsk_queue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct xdp_umem_ring *umem_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct xdp_rxtx_ring *rxtx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (umem_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return struct_size(umem_ring, desc, q->nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return struct_size(rxtx_ring, desc, q->nentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct xsk_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) gfp_t gfp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) q = kzalloc(sizeof(*q), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) q->nentries = nentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) q->ring_mask = nentries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __GFP_COMP | __GFP_NORETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) size = xskq_get_ring_size(q, umem_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!q->ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kfree(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void xskq_destroy(struct xsk_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) page_frag_free(q->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) kfree(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }