^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* AF_XDP internal functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _LINUX_XDP_SOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _LINUX_XDP_SOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/if_xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct xsk_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct xdp_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct xdp_umem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void *addrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u32 headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u32 chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u32 chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct user_struct *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) refcount_t users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bool zc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct page **pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct list_head xsk_dma_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct xsk_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct bpf_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) spinlock_t lock; /* Synchronize map updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct xdp_sock *xsk_map[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct xdp_sock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* struct sock must be the first member of struct xdp_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct sock sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct xsk_queue *rx ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct xdp_umem *umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct list_head flush_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct xsk_buff_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u16 queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bool zc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) XSK_READY = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) XSK_BOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) XSK_UNBOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) } state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct xsk_queue *tx ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Protects generic receive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) spinlock_t rx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u64 rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u64 rx_queue_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct list_head map_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Protects map_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spinlock_t map_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Protects multiple processes in the control path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct mutex mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifdef CONFIG_XDP_SOCKETS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void __xsk_map_flush(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct xsk_map *m = container_of(map, struct xsk_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct xdp_sock *xs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (key >= map->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) xs = READ_ONCE(m->xsk_map[key]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return xs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void __xsk_map_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif /* CONFIG_XDP_SOCKETS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif /* _LINUX_XDP_SOCK_H */