^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <net/xsk_buff_pool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <net/xdp_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <net/xdp_sock_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xsk_queue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xdp_umem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xsk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) if (!xs->tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!xs->tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) list_del_rcu(&xs->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void xp_destroy(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) kvfree(pool->heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kvfree(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct xsk_buff_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct xdp_buff_xsk *xskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!pool->heads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) pool->chunk_mask = ~((u64)umem->chunk_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) pool->addrs_cnt = umem->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pool->heads_cnt = umem->chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pool->free_heads_cnt = umem->chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) pool->headroom = umem->headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pool->chunk_size = umem->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pool->frame_len = umem->chunk_size - umem->headroom -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pool->umem = umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) pool->addrs = umem->addrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) INIT_LIST_HEAD(&pool->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) INIT_LIST_HEAD(&pool->xsk_tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) spin_lock_init(&pool->xsk_tx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) spin_lock_init(&pool->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) refcount_set(&pool->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) pool->fq = xs->fq_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pool->cq = xs->cq_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) for (i = 0; i < pool->free_heads_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) xskb = &pool->heads[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) xskb->pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pool->free_heads[i] = xskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) xp_destroy(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) for (i = 0; i < pool->heads_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) pool->heads[i].xdp.rxq = rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) EXPORT_SYMBOL(xp_set_rxq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct netdev_bpf bpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (pool->umem->zc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bpf.command = XDP_SETUP_XSK_POOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bpf.xsk.pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bpf.xsk.queue_id = pool->queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) WARN(1, "Failed to disable zero-copy!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int __xp_assign_dev(struct xsk_buff_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct net_device *netdev, u16 queue_id, u16 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) bool force_zc, force_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct netdev_bpf bpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) force_zc = flags & XDP_ZEROCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) force_copy = flags & XDP_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (force_zc && force_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (xsk_get_pool_from_qid(netdev, queue_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pool->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pool->queue_id = queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (flags & XDP_USE_NEED_WAKEUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pool->uses_need_wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Tx needs to be explicitly woken up the first time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Also for supporting drivers that do not implement this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * feature. They will always have to call sendto().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pool->cached_need_wakeup = XDP_WAKEUP_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dev_hold(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (force_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* For copy-mode, we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!netdev->netdev_ops->ndo_bpf ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) !netdev->netdev_ops->ndo_xsk_wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto err_unreg_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bpf.command = XDP_SETUP_XSK_POOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bpf.xsk.pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bpf.xsk.queue_id = queue_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto err_unreg_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!pool->dma_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) WARN(1, "Driver did not DMA map zero-copy buffers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto err_unreg_xsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pool->umem->zc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) err_unreg_xsk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) xp_disable_drv_zc(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) err_unreg_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!force_zc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) err = 0; /* fallback to copy mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) xsk_clear_pool_at_qid(netdev, queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_put(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u16 queue_id, u16 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return __xp_assign_dev(pool, dev, queue_id, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct net_device *dev, u16 queue_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* One fill and completion ring required for each queue id. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!pool->fq || !pool->cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (pool->uses_need_wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) flags |= XDP_USE_NEED_WAKEUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return __xp_assign_dev(pool, dev, queue_id, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void xp_clear_dev(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!pool->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) xp_disable_drv_zc(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dev_put(pool->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pool->netdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void xp_release_deferred(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) xp_clear_dev(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (pool->fq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) xskq_destroy(pool->fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pool->fq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (pool->cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) xskq_destroy(pool->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pool->cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) xdp_put_umem(pool->umem, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) xp_destroy(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void xp_get_pool(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) refcount_inc(&pool->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) bool xp_put_pool(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (refcount_dec_and_test(&pool->users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) INIT_WORK(&pool->work, xp_release_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) schedule_work(&pool->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct xsk_dma_map *dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (dma_map->netdev == pool->netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 nr_pages, struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct xsk_dma_map *dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!dma_map->dma_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kfree(dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dma_map->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dma_map->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dma_map->dma_need_sync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dma_map->dma_pages_cnt = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) refcount_set(&dma_map->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) list_add(&dma_map->list, &umem->xsk_dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) list_del(&dma_map->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) kvfree(dma_map->dma_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) kfree(dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dma_addr_t *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) for (i = 0; i < dma_map->dma_pages_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dma = &dma_map->dma_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (*dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) DMA_BIDIRECTIONAL, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) xp_destroy_dma_map(dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct xsk_dma_map *dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (pool->dma_pages_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dma_map = xp_find_dma_map(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) WARN(1, "Could not find dma_map for device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!refcount_dec_and_test(&dma_map->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) __xp_dma_unmap(dma_map, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) kvfree(pool->dma_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pool->dma_pages_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pool->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) EXPORT_SYMBOL(xp_dma_unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!pool->dma_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) pool->dev = dma_map->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pool->dma_pages_cnt = dma_map->dma_pages_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pool->dma_need_sync = dma_map->dma_need_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) memcpy(pool->dma_pages, dma_map->dma_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pool->dma_pages_cnt * sizeof(*pool->dma_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned long attrs, struct page **pages, u32 nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct xsk_dma_map *dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dma_map = xp_find_dma_map(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) err = xp_init_dma_info(pool, dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) refcount_inc(&dma_map->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) for (i = 0; i < dma_map->dma_pages_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) DMA_BIDIRECTIONAL, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (dma_mapping_error(dev, dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) __xp_dma_unmap(dma_map, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (dma_need_sync(dev, dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dma_map->dma_need_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dma_map->dma_pages[i] = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (pool->unaligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) xp_check_dma_contiguity(dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) err = xp_init_dma_info(pool, dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __xp_dma_unmap(dma_map, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) EXPORT_SYMBOL(xp_dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) *addr = xp_unaligned_extract_addr(*addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (*addr >= pool->addrs_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) *addr + pool->chunk_size > pool->addrs_cnt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) xp_addr_crosses_non_contig_pg(pool, *addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) *addr = xp_aligned_extract_addr(pool, *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return *addr < pool->addrs_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct xdp_buff_xsk *xskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bool ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (pool->free_heads_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) xskb = pool->free_heads[--pool->free_heads_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pool->fq->queue_empty_descs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) xp_release(xskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) xp_check_aligned(pool, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pool->fq->invalid_descs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) xskq_cons_release(pool->fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) xskq_cons_release(pool->fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) xskb->orig_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (pool->dma_pages_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ~XSK_NEXT_PG_CONTIG_MASK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) (addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) xskb->dma = xskb->frame_dma + pool->headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return xskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct xdp_buff_xsk *xskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (!pool->free_list_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) xskb = __xp_alloc(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!xskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) pool->free_list_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) free_list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) list_del(&xskb->free_list_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) xskb->xdp.data_meta = xskb->xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (pool->dma_need_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pool->frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return &xskb->xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) EXPORT_SYMBOL(xp_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (pool->free_list_cnt >= count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) EXPORT_SYMBOL(xp_can_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) void xp_free(struct xdp_buff_xsk *xskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) xskb->pool->free_list_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) list_add(&xskb->free_list_node, &xskb->pool->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) EXPORT_SYMBOL(xp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return pool->addrs + addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) EXPORT_SYMBOL(xp_raw_get_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return (pool->dma_pages[addr >> PAGE_SHIFT] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ~XSK_NEXT_PG_CONTIG_MASK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) (addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) EXPORT_SYMBOL(xp_raw_get_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) xskb->pool->frame_len, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dma_sync_single_range_for_device(pool->dev, dma, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) size, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) EXPORT_SYMBOL(xp_dma_sync_for_device_slow);