Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* XDP user-space packet buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "xdp_umem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "xsk_queue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define XDP_UMEM_MIN_CHUNK_SIZE 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static DEFINE_IDA(umem_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static void xdp_umem_unpin_pages(struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	kfree(umem->pgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	umem->pgs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (umem->user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		free_uid(umem->user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static void xdp_umem_addr_unmap(struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	vunmap(umem->addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	umem->addrs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			     u32 nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (!umem->addrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static void xdp_umem_release(struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	umem->zc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	ida_simple_remove(&umem_ida, umem->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	xdp_umem_addr_unmap(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	xdp_umem_unpin_pages(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	xdp_umem_unaccount_pages(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	kfree(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static void xdp_umem_release_deferred(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	xdp_umem_release(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) void xdp_get_umem(struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	refcount_inc(&umem->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (!umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (refcount_dec_and_test(&umem->users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (defer_cleanup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			INIT_WORK(&umem->work, xdp_umem_release_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			schedule_work(&umem->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			xdp_umem_release(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	unsigned int gup_flags = FOLL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	long npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			    GFP_KERNEL | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (!umem->pgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	mmap_read_lock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	npgs = pin_user_pages(address, umem->npgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	mmap_read_unlock(current->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (npgs != umem->npgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		if (npgs >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			umem->npgs = npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			goto out_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		err = npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		goto out_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) out_pin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	xdp_umem_unpin_pages(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) out_pgs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	kfree(umem->pgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	umem->pgs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int xdp_umem_account_pages(struct xdp_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned long lock_limit, new_npgs, old_npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (capable(CAP_IPC_LOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	umem->user = get_uid(current_user());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		old_npgs = atomic_long_read(&umem->user->locked_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		new_npgs = old_npgs + umem->npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		if (new_npgs > lock_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			free_uid(umem->user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			umem->user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 				     new_npgs) != old_npgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	u64 npgs, addr = mr->addr, size = mr->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	unsigned int chunks, chunks_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		/* Strictly speaking we could support this, if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		 * - huge pages, or*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		 * - using an IOMMU, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		 * - making sure the memory area is consecutive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		 * but for now, we simply say "computer says no".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (!unaligned_chunks && !is_power_of_2(chunk_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!PAGE_ALIGNED(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		/* Memory area has to be page size aligned. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		 * simplicity, this might change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if ((addr + size) < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (npgs_rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		npgs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (npgs > U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (chunks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (!unaligned_chunks && chunks_rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	umem->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	umem->headroom = headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	umem->chunk_size = chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	umem->chunks = chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	umem->npgs = (u32)npgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	umem->pgs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	umem->user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	umem->flags = mr->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	INIT_LIST_HEAD(&umem->xsk_dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	refcount_set(&umem->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	err = xdp_umem_account_pages(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	err = xdp_umem_pin_pages(umem, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		goto out_account;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		goto out_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) out_unpin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	xdp_umem_unpin_pages(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) out_account:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	xdp_umem_unaccount_pages(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct xdp_umem *umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (!umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		kfree(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	umem->id = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	err = xdp_umem_reg(umem, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		ida_simple_remove(&umem_ida, umem->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		kfree(umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }