Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright(c) 2016 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This file is provided under a dual BSD/GPLv2 license.  When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *  - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *    notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *  - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *    notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *    the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *    distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *  - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *    contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *    from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include "srq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include "vt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * rvt_driver_srq_init - init srq resources on a per driver basis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * @rdi: rvt dev structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Do any initialization needed when a driver registers with rdmavt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) void rvt_driver_srq_init(struct rvt_dev_info *rdi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	spin_lock_init(&rdi->n_srqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	rdi->n_srqs_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * rvt_create_srq - create a shared receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * @ibpd: the protection domain of the SRQ to create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * @srq_init_attr: the attributes of the SRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * @udata: data from libibverbs when creating a user SRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		   struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (srq_init_attr->srq_type != IB_SRQT_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (srq_init_attr->attr.max_sge == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	    srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	    srq_init_attr->attr.max_wr == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	    srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * Need to use vmalloc() if we want to support large #s of entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	srq->rq.size = srq_init_attr->attr.max_wr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	srq->rq.max_sge = srq_init_attr->attr.max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		sizeof(struct rvt_rwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			 dev->dparms.node, udata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		goto bail_srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 * Return the address of the RWQ as the offset to mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 * See rvt_mmap() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (udata && udata->outlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (IS_ERR(srq->ip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			ret = PTR_ERR(srq->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			goto bail_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		ret = ib_copy_to_udata(udata, &srq->ip->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 				       sizeof(srq->ip->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			goto bail_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * ib_create_srq() will initialize srq->ibsrq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	spin_lock_init(&srq->rq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	srq->limit = srq_init_attr->attr.srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	spin_lock(&dev->n_srqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		spin_unlock(&dev->n_srqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		goto bail_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	dev->n_srqs_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	spin_unlock(&dev->n_srqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (srq->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		spin_lock_irq(&dev->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		spin_unlock_irq(&dev->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bail_ip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	kfree(srq->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) bail_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	rvt_free_rq(&srq->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bail_srq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * rvt_modify_srq - modify a shared receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * @ibsrq: the SRQ to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @attr: the new attributes of the SRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * @attr_mask: indicates which attributes to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * @udata: user data for libibverbs.so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * Return: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		   enum ib_srq_attr_mask attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		   struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct rvt_rq tmp_rq = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (attr_mask & IB_SRQ_MAX_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		struct rvt_krwq *okwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		struct rvt_rwq *owq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		struct rvt_rwqe *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		u32 sz, size, n, head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		/* Check that the requested sizes are below the limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		    ((attr_mask & IB_SRQ_LIMIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		     attr->srq_limit : srq->limit) > attr->max_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		sz = sizeof(struct rvt_rwqe) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			srq->rq.max_sge * sizeof(struct ib_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		size = attr->max_wr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 				 udata))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		/* Check that we can write the offset to mmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (udata && udata->inlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			__u64 offset_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			__u64 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			ret = ib_copy_from_udata(&offset_addr, udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 						 sizeof(offset_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 				goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			udata->outbuf = (void __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 					(unsigned long)offset_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			ret = ib_copy_to_udata(udata, &offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 					       sizeof(offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 				goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		spin_lock_irq(&srq->rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		 * validate head and tail pointer values and compute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		 * the number of remaining WQEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			owq = srq->rq.wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			head = RDMA_READ_UAPI_ATOMIC(owq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			okwq = srq->rq.kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			head = okwq->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			tail = okwq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if (head >= srq->rq.size || tail >= srq->rq.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		n = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		if (n < tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			n += srq->rq.size - tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			n -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		if (size <= n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			goto bail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		p = tmp_rq.kwq->curr_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		while (tail != head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			struct rvt_rwqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			p->wr_id = wqe->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			p->num_sge = wqe->num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			for (i = 0; i < wqe->num_sge; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 				p->sg_list[i] = wqe->sg_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			p = (struct rvt_rwqe *)((char *)p + sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			if (++tail >= srq->rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		srq->rq.kwq = tmp_rq.kwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			srq->rq.wq = tmp_rq.wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			tmp_rq.kwq->head = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			tmp_rq.kwq->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		srq->rq.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		if (attr_mask & IB_SRQ_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			srq->limit = attr->srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		spin_unlock_irq(&srq->rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		vfree(owq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		kvfree(okwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		if (srq->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			struct rvt_mmap_info *ip = srq->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			u32 s = sizeof(struct rvt_rwq) + size * sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			 * Return the offset to mmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			 * See rvt_mmap() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			if (udata && udata->inlen >= sizeof(__u64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				ret = ib_copy_to_udata(udata, &ip->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 						       sizeof(ip->offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 					return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			 * Put user mapping info onto the pending list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			 * unless it already is on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			spin_lock_irq(&dev->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			if (list_empty(&ip->pending_mmaps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 				list_add(&ip->pending_mmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 					 &dev->pending_mmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			spin_unlock_irq(&dev->pending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	} else if (attr_mask & IB_SRQ_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		spin_lock_irq(&srq->rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		if (attr->srq_limit >= srq->rq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			srq->limit = attr->srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		spin_unlock_irq(&srq->rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) bail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	spin_unlock_irq(&srq->rq.kwq->c_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) bail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	rvt_free_rq(&tmp_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /** rvt_query_srq - query srq data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  * @ibsrq: srq to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * @attr: return info in attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * Return: always 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	attr->max_wr = srq->rq.size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	attr->max_sge = srq->rq.max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	attr->srq_limit = srq->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  * rvt_destroy_srq - destory an srq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * @ibsrq: srq object to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	spin_lock(&dev->n_srqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	dev->n_srqs_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	spin_unlock(&dev->n_srqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	if (srq->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		kref_put(&srq->ip->ref, rvt_release_mmap_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	kvfree(srq->rq.kwq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }