Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/mlx4/qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/mlx4/srq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include "mlx4_ib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <rdma/mlx4-abi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static void *get_wqe(struct mlx4_ib_srq *srq, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (ibsrq->event_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		event.device      = ibsrq->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		event.element.srq = ibsrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		case MLX4_EVENT_TYPE_SRQ_LIMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			event.event = IB_EVENT_SRQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			pr_warn("Unexpected event type %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			       "on SRQ %06x\n", type, srq->srqn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		ibsrq->event_handler(&event, ibsrq->srq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) int mlx4_ib_create_srq(struct ib_srq *ib_srq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		       struct ib_srq_init_attr *init_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		       struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		udata, struct mlx4_ib_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct mlx4_ib_srq *srq = to_msrq(ib_srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct mlx4_wqe_srq_next_seg *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct mlx4_wqe_data_seg *scatter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u32 cqn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u16 xrcdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	int desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	int buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/* Sanity check SRQ size before proceeding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	    init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	mutex_init(&srq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	spin_lock_init(&srq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	srq->msrq.max_gs = init_attr->attr.max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	desc_size = max(32UL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 					   srq->msrq.max_gs *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 					   sizeof (struct mlx4_wqe_data_seg)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	srq->msrq.wqe_shift = ilog2(desc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	buf_size = srq->msrq.max * desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		struct mlx4_ib_create_srq ucmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		srq->umem =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (IS_ERR(srq->umem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			return PTR_ERR(srq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		err = mlx4_mtt_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			PAGE_SHIFT, &srq->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		*srq->db.db = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				   &srq->buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			goto err_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		srq->head    = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		srq->tail    = srq->msrq.max - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		srq->wqe_ctr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		for (i = 0; i < srq->msrq.max; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			next = get_wqe(srq, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			next->next_wqe_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				cpu_to_be16((i + 1) & (srq->msrq.max - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			for (scatter = (void *) (next + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			     (void *) scatter < (void *) next + desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			     ++scatter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 				scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 				    &srq->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		srq->wrid = kvmalloc_array(srq->msrq.max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 					   sizeof(u64), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		if (!srq->wrid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			goto err_mtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	cqn = ib_srq_has_cq(init_attr->srq_type) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		(u16) dev->dev->caps.reserved_xrcds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			     &srq->mtt, srq->db.dma, &srq->msrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		goto err_wrid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	srq->msrq.event = mlx4_ib_srq_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			goto err_wrid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	init_attr->attr.max_wr = srq->msrq.max - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err_wrid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		mlx4_ib_db_unmap_user(ucontext, &srq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		kvfree(srq->wrid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err_mtt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	mlx4_mtt_cleanup(dev->dev, &srq->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (!srq->umem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		mlx4_buf_free(dev->dev, buf_size, &srq->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	ib_umem_release(srq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) err_db:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (!udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		mlx4_db_free(dev->dev, &srq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* We don't support resizing SRQs (yet?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if (attr_mask & IB_SRQ_MAX_WR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (attr_mask & IB_SRQ_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		if (attr->srq_limit >= srq->msrq.max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		mutex_lock(&srq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		mutex_unlock(&srq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	int limit_watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	srq_attr->srq_limit = limit_watermark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	srq_attr->max_wr    = srq->msrq.max - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	srq_attr->max_sge   = srq->msrq.max_gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct mlx4_ib_dev *dev = to_mdev(srq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	struct mlx4_ib_srq *msrq = to_msrq(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	mlx4_srq_free(dev->dev, &msrq->msrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		mlx4_ib_db_unmap_user(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 				struct mlx4_ib_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 				ibucontext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			&msrq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		kvfree(msrq->wrid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			      &msrq->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		mlx4_db_free(dev->dev, &msrq->db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	ib_umem_release(msrq->umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct mlx4_wqe_srq_next_seg *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	/* always called with interrupts disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	spin_lock(&srq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	next = get_wqe(srq, srq->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	next->next_wqe_index = cpu_to_be16(wqe_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	srq->tail = wqe_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	spin_unlock(&srq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			  const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct mlx4_wqe_srq_next_seg *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct mlx4_wqe_data_seg *scat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	int nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	spin_lock_irqsave(&srq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		nreq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		if (unlikely(srq->head == srq->tail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		srq->wrid[srq->head] = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		next      = get_wqe(srq, srq->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		srq->head = be16_to_cpu(next->next_wqe_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		scat      = (struct mlx4_wqe_data_seg *) (next + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		for (i = 0; i < wr->num_sge; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		if (i < srq->msrq.max_gs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			scat[i].byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			scat[i].addr       = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (likely(nreq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		srq->wqe_ctr += nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		 * Make sure that descriptors are written before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		 * doorbell record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		*srq->db.db = cpu_to_be32(srq->wqe_ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	spin_unlock_irqrestore(&srq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }