^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2016 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "mmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * rvt_mmap_init - init link list and lock for mem map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @rdi: rvt dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void rvt_mmap_init(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) INIT_LIST_HEAD(&rdi->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) spin_lock_init(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) rdi->mmap_offset = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) spin_lock_init(&rdi->mmap_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * rvt_release_mmap_info - free mmap info structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @ref: a pointer to the kref within struct rvt_mmap_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void rvt_release_mmap_info(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rvt_mmap_info *ip =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) container_of(ref, struct rvt_mmap_info, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) list_del(&ip->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_unlock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) vfree(ip->obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) kfree(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void rvt_vma_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct rvt_mmap_info *ip = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kref_get(&ip->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void rvt_vma_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct rvt_mmap_info *ip = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) kref_put(&ip->ref, rvt_release_mmap_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static const struct vm_operations_struct rvt_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .open = rvt_vma_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .close = rvt_vma_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * rvt_mmap - create a new mmap region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * @context: the IB user context of the process making the mmap() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @vma: the VMA to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Return: zero if the mmap is OK. Otherwise, return an errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct rvt_dev_info *rdi = ib_to_rvt(context->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct rvt_mmap_info *ip, *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Search the device's list of objects waiting for a mmap call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Normally, this list is very short since a call to create a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * CQ, QP, or SRQ is soon followed by a call to mmap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_lock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) pending_mmaps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Only the creator is allowed to mmap the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (context != ip->context || (__u64)offset != ip->offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Don't allow a mmap larger than the object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (size > ip->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) list_del_init(&ip->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spin_unlock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ret = remap_vmalloc_range(vma, ip->obj, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) vma->vm_ops = &rvt_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) vma->vm_private_data = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) rvt_vma_open(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) spin_unlock_irq(&rdi->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * rvt_create_mmap_info - allocate information for hfi1_mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @rdi: rvt dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @size: size in bytes to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @udata: user data (must be valid!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @obj: opaque pointer to a cq, wq etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Return: rvt_mmap struct on success, ERR_PTR on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct ib_udata *udata, void *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct rvt_mmap_info *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_lock_irq(&rdi->mmap_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (rdi->mmap_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ip->offset = rdi->mmap_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rdi->mmap_offset += ALIGN(size, SHMLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) spin_unlock_irq(&rdi->mmap_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) INIT_LIST_HEAD(&ip->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ip->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ip->context =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) container_of(udata, struct uverbs_attr_bundle, driver_udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ip->obj = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) kref_init(&ip->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * rvt_update_mmap_info - update a mem map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @rdi: rvt dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @ip: mmap info pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @size: size to grow by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @obj: opaque pointer to cq, wq, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 size, void *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) spin_lock_irq(&rdi->mmap_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (rdi->mmap_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rdi->mmap_offset = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ip->offset = rdi->mmap_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rdi->mmap_offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) spin_unlock_irq(&rdi->mmap_offset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ip->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ip->obj = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }