^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 2019 Intel Corporation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #ifndef XSK_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #define XSK_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /* Masks for xdp_umem_page flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * The low 12-bits of the addr will be 0 since this is the page address, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * can use them for flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define XSK_NEXT_PG_CONTIG_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct xdp_ring_offset_v1 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) __u64 producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) __u64 consumer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) __u64 desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct xdp_mmap_offsets_v1 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct xdp_ring_offset_v1 rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct xdp_ring_offset_v1 tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct xdp_ring_offset_v1 fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct xdp_ring_offset_v1 cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* Nodes are linked in the struct xdp_sock map_list field, and used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * track which maps a certain socket reside in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct xsk_map_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct xsk_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct xdp_sock **map_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline struct xdp_sock *xdp_sk(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return (struct xdp_sock *)sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct xdp_sock **map_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int xsk_map_inc(struct xsk_map *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void xsk_map_put(struct xsk_map *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u16 queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif /* XSK_H_ */