^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef IB_UMEM_ODP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define IB_UMEM_ODP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <rdma/ib_umem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct ib_umem_odp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct ib_umem umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct mmu_interval_notifier notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct pid *tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* An array of the pfns included in the on-demand paging umem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long *pfn_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * An array with DMA addresses mapped for pfns in pfn_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The lower two bits designate access permissions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) dma_addr_t *dma_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * The umem_mutex protects the page_list and dma_list fields of an ODP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * umem, allowing only a single thread to map/unmap pages. The mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * also protects access to the mmu notifier counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct mutex umem_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) void *private; /* for the HW driver to use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * only as an anchor for the driver to hold onto the per_mm. FIXME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * This should be removed and drivers should work with the per_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool is_implicit_odp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return container_of(umem, struct ib_umem_odp, umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Returns the first page of an ODP umem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return umem_odp->notifier.interval_tree.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Returns the address of the page after the last one of an ODP umem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return umem_odp->notifier.interval_tree.last + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) umem_odp->page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The lower 2 bits of the DMA address signal the R/W permissions for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the entry. To upgrade the permissions, provide the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * bitmask to the map_dma_pages function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Be aware that upgrading a mapped address might result in change of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * the DMA address for the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define ODP_READ_ALLOWED_BIT (1<<0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct ib_umem_odp *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int access, const struct mmu_interval_notifier_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct ib_umem_odp *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) const struct mmu_interval_notifier_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u64 bcnt, u64 access_mask, bool fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u64 bound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline struct ib_umem_odp *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int access, const struct mmu_interval_notifier_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif /* IB_UMEM_ODP_H */