^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007 Cisco Systems. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef IB_UMEM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define IB_UMEM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct ib_ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct ib_umem_odp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct ib_umem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct ib_device *ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct mm_struct *owning_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u64 iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u32 writable : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u32 is_odp : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct sg_table sg_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int nmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int sg_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Returns the offset of the umem start relative to the first page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static inline int ib_umem_offset(struct ib_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return umem->address & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long pgsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ALIGN_DOWN(umem->iova, pgsz))) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pgsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline size_t ib_umem_num_pages(struct ib_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct ib_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long pgsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @umem: umem to iterate over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @pgsz: Page size to split the list into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * returned DMA blocks will be aligned to pgsz and span the range:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Performs exactly ib_umem_num_dma_blocks() iterations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __rdma_block_iter_next(biter);)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifdef CONFIG_INFINIBAND_USER_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) size_t size, int access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void ib_umem_release(struct ib_umem *umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) size_t length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long pgsz_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #else /* CONFIG_INFINIBAND_USER_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline struct ib_umem *ib_umem_get(struct ib_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline void ib_umem_release(struct ib_umem *umem) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) size_t length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long pgsz_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif /* CONFIG_INFINIBAND_USER_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif /* IB_UMEM_H */