Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <rdma/uverbs_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "iw_cxgb4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static int db_delay_usecs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) module_param(db_delay_usecs, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static int ocqp_support = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) module_param(ocqp_support, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) int db_fc_threshold = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) module_param(db_fc_threshold, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) MODULE_PARM_DESC(db_fc_threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		 "QP count/threshold that triggers"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		 " automatic db flow control mode (default = 1000)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) int db_coalescing_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) module_param(db_coalescing_threshold, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) MODULE_PARM_DESC(db_coalescing_threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		 "QP count/threshold that triggers"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		 " disabling db coalescing (default = 0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static int max_fr_immd = T4_MAX_FR_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) module_param(max_fr_immd, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static int alloc_ird(struct c4iw_dev *dev, u32 ird)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	xa_lock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	if (ird <= dev->avail_ird)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		dev->avail_ird -= ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	xa_unlock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		dev_warn(&dev->rdev.lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 			 "device IRD resources exhausted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static void free_ird(struct c4iw_dev *dev, int ird)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	xa_lock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	dev->avail_ird += ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	xa_unlock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	spin_lock_irqsave(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	qhp->attr.state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			  dma_unmap_addr(sq, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (t4_sq_onchip(sq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		dealloc_oc_sq(rdev, sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		dealloc_host_sq(rdev, sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	if (!ocqp_support || !ocqp_supported(&rdev->lldi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	if (!sq->dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			rdev->lldi.vr->ocq.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 					    rdev->lldi.vr->ocq.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	sq->flags |= T4_SQ_ONCHIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 				       &(sq->dma_addr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (!sq->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	sq->phys_addr = virt_to_phys(sq->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	dma_unmap_addr_set(sq, mapping, sq->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	int ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	if (user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		ret = alloc_oc_sq(rdev, sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		ret = alloc_host_sq(rdev, sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		      struct c4iw_dev_ucontext *uctx, int has_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 * uP clears EQ contexts when the connection exits rdma mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	 * so no need to post a RESET WR for these EQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	dealloc_sq(rdev, &wq->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	kfree(wq->sq.sw_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (has_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		dma_free_coherent(&rdev->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 				  wq->rq.memsize, wq->rq.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 				  dma_unmap_addr(&wq->rq, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		kfree(wq->rq.sw_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		c4iw_put_qpid(rdev, wq->rq.qid, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * then this is a user mapping so compute the page-aligned physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * for mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			      enum cxgb4_bar2_qtype qtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 			      unsigned int *pbar2_qid, u64 *pbar2_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	u64 bar2_qoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 				   pbar2_pa ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 				   &bar2_qoffset, pbar2_qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (pbar2_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (is_t4(rdev->lldi.adapter_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	return rdev->bar2_kva + bar2_qoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		     struct t4_cq *rcq, struct t4_cq *scq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		     struct c4iw_dev_ucontext *uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		     struct c4iw_wr_wait *wr_waitp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		     int need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	int user = (uctx != &rdev->uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	struct fw_ri_res_wr *res_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct fw_ri_res *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	int wr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	int eqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	if (!wq->sq.qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	if (need_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		wq->rq.qid = c4iw_get_qpid(rdev, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		if (!wq->rq.qid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			goto free_sq_qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (!user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (!wq->sq.sw_sq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			goto free_rq_qid;//FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		if (need_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			wq->rq.sw_rq = kcalloc(wq->rq.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 					       sizeof(*wq->rq.sw_rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 					       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			if (!wq->rq.sw_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 				goto free_sw_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (need_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		 * RQT must be a power of 2 and at least 16 deep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		wq->rq.rqt_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		if (!wq->rq.rqt_hwaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			goto free_sw_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	ret = alloc_sq(rdev, &wq->sq, user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		goto free_hwaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	memset(wq->sq.queue, 0, wq->sq.memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (need_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 						  wq->rq.memsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 						  &wq->rq.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 						  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		if (!wq->rq.queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			goto free_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			 wq->sq.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			 (unsigned long long)virt_to_phys(wq->sq.queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			 wq->rq.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			 (unsigned long long)virt_to_phys(wq->rq.queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	wq->db = rdev->lldi.db_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 					 CXGB4_BAR2_QTYPE_EGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 					 &wq->sq.bar2_qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 					 user ? &wq->sq.bar2_pa : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 						 CXGB4_BAR2_QTYPE_EGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 						 &wq->rq.bar2_qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 						 user ? &wq->rq.bar2_pa : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 * User mode must have bar2 access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		goto free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	wq->rdev = rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	wq->rq.msn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	/* build fw_ri_res_wr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		wr_len += sizeof(*res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	skb = alloc_skb(wr_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		goto free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	res_wr = __skb_put_zero(skb, wr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	res_wr->op_nres = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 			FW_WR_OP_V(FW_RI_RES_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			FW_WR_COMPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	res_wr->cookie = (uintptr_t)wr_waitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	res = res_wr->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * eqsize is the number of 64B entries plus the status page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		rdev->hw_queue.t4_eq_status_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		FW_RI_RES_WR_HOSTFCMODE_V(0) |	/* no host cidx updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		FW_RI_RES_WR_CPRIO_V(0) |	/* don't keep in chip cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		FW_RI_RES_WR_PCIECHN_V(0) |	/* set by uP at ri_init time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		(t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		FW_RI_RES_WR_IQID_V(scq->cqid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		FW_RI_RES_WR_DCAEN_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		FW_RI_RES_WR_DCACPU_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		FW_RI_RES_WR_FBMIN_V(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		(t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 					 FW_RI_RES_WR_FBMAX_V(3)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		FW_RI_RES_WR_CIDXFTHRESH_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		FW_RI_RES_WR_EQSIZE_V(eqsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (need_rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		res++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		res->u.sqrq.op = FW_RI_RES_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		 * eqsize is the number of 64B entries plus the status page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			rdev->hw_queue.t4_eq_status_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		res->u.sqrq.fetchszm_to_iqid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			/* no host cidx updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			/* don't keep in chip cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			FW_RI_RES_WR_CPRIO_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			/* set by uP at ri_init time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			FW_RI_RES_WR_PCIECHN_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			FW_RI_RES_WR_IQID_V(rcq->cqid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		res->u.sqrq.dcaen_to_eqsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			FW_RI_RES_WR_DCACPU_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			FW_RI_RES_WR_FBMIN_V(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			FW_RI_RES_WR_FBMAX_V(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			FW_RI_RES_WR_CIDXFTHRESH_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			FW_RI_RES_WR_EQSIZE_V(eqsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	c4iw_init_wr_wait(wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		goto free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		 wq->sq.qid, wq->rq.qid, wq->db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		 wq->sq.bar2_va, wq->rq.bar2_va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	if (need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		dma_free_coherent(&rdev->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				  wq->rq.memsize, wq->rq.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				  dma_unmap_addr(&wq->rq, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) free_sq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	dealloc_sq(rdev, &wq->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) free_hwaddr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) free_sw_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	if (need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		kfree(wq->rq.sw_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) free_sw_sq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	kfree(wq->sq.sw_sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) free_rq_qid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (need_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		c4iw_put_qpid(rdev, wq->rq.qid, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) free_sq_qid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		      const struct ib_send_wr *wr, int max, u32 *plenp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	u8 *dstp, *srcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	u32 plen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int rem, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	dstp = (u8 *)immdp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	for (i = 0; i < wr->num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		if ((plen + wr->sg_list[i].length) > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		plen += wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		rem = wr->sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		while (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			if (dstp == (u8 *)&sq->queue[sq->size])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				dstp = (u8 *)sq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				len = rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 				len = (u8 *)&sq->queue[sq->size] - dstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			memcpy(dstp, srcp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			dstp += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			srcp += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			rem -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		memset(dstp, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	immdp->op = FW_RI_DATA_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	immdp->r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	immdp->r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	immdp->immdlen = cpu_to_be32(plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	*plenp = plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static int build_isgl(__be64 *queue_start, __be64 *queue_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		      struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		      int num_sge, u32 *plenp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	u32 plen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	__be64 *flitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if ((__be64 *)isglp == queue_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		isglp = (struct fw_ri_isgl *)queue_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	flitp = (__be64 *)isglp->sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	for (i = 0; i < num_sge; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		if ((plen + sg_list[i].length) < plen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		plen += sg_list[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		*flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 				     sg_list[i].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		if (++flitp == queue_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			flitp = queue_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		*flitp = cpu_to_be64(sg_list[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		if (++flitp == queue_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			flitp = queue_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	*flitp = (__force __be64)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	isglp->op = FW_RI_DATA_ISGL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	isglp->r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	isglp->nsge = cpu_to_be16(num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	isglp->r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (plenp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		*plenp = plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			   const struct ib_send_wr *wr, u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (wr->num_sge > T4_MAX_SEND_SGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		if (wr->send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			wqe->send.sendop_pkd = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			wqe->send.sendop_pkd = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		wqe->send.stag_inv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		if (wr->send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			wqe->send.sendop_pkd = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			wqe->send.sendop_pkd = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 				FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	wqe->send.r3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	wqe->send.r4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	plen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (wr->num_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		if (wr->send_flags & IB_SEND_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			ret = build_immd(sq, wqe->send.u.immd_src, wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 					 T4_MAX_SEND_INLINE, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			       plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			ret = build_isgl((__be64 *)sq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 					 (__be64 *)&sq->queue[sq->size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 					 wqe->send.u.isgl_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 					 wr->sg_list, wr->num_sge, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			       wr->num_sge * sizeof(struct fw_ri_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		wqe->send.u.immd_src[0].r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		wqe->send.u.immd_src[0].r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		wqe->send.u.immd_src[0].immdlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		plen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	*len16 = DIV_ROUND_UP(size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	wqe->send.plen = cpu_to_be32(plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			    const struct ib_send_wr *wr, u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (wr->num_sge > T4_MAX_SEND_SGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 * iWARP protocol supports 64 bit immediate data but rdma api
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	 * limits it to 32bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (wr->num_sge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		if (wr->send_flags & IB_SEND_INLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			ret = build_immd(sq, wqe->write.u.immd_src, wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 					 T4_MAX_WRITE_INLINE, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			       plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			ret = build_isgl((__be64 *)sq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 					 (__be64 *)&sq->queue[sq->size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 					 wqe->write.u.isgl_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 					 wr->sg_list, wr->num_sge, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			       wr->num_sge * sizeof(struct fw_ri_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		wqe->write.u.immd_src[0].r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		wqe->write.u.immd_src[0].r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		wqe->write.u.immd_src[0].immdlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		plen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	*len16 = DIV_ROUND_UP(size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	wqe->write.plen = cpu_to_be32(plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			    struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	memset(immdp->r1, 0, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	immdp->op = FW_RI_DATA_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	immdp->immdlen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) static void build_rdma_write_cmpl(struct t4_sq *sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 				  struct fw_ri_rdma_write_cmpl_wr *wcwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 				  const struct ib_send_wr *wr, u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * This code assumes the struct fields preceding the write isgl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * fit in one 64B WR slot.  This is because the WQE is built
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * directly in the dma queue, and wrapping is only handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 * by the code buildling sgls.  IE the "fixed part" of the wr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * structs must all fit in 64B.  The WQE build code should probably be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * redesigned to avoid this restriction, but for now just add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (wr->next->opcode == IB_WR_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		wcwr->stag_inv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	wcwr->r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	wcwr->r3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/* SEND_INV SGL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (wr->next->send_flags & IB_SEND_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			   &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	/* WRITE SGL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		   wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		wr->num_sge * sizeof(struct fw_ri_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	wcwr->plen = cpu_to_be32(plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	*len16 = DIV_ROUND_UP(size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			   u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (wr->num_sge > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (wr->num_sge && wr->sg_list[0].length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 							>> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 							 >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		wqe->read.stag_src = cpu_to_be32(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		wqe->read.to_src_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		wqe->read.to_src_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		wqe->read.stag_sink = cpu_to_be32(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		wqe->read.plen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		wqe->read.to_sink_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		wqe->read.to_sink_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	wqe->read.r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	wqe->read.r5 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	*len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			     qhp->sq_sig_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			      qhp->sq_sig_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct t4_swsqe *swsqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	union t4_wr *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	u16 write_wrid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	u8 len16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * The sw_sq entries still look like a WRITE and a SEND and consume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * 2 slots. The FW WR, however, will be a single uber-WR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	       qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/* WRITE swsqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	swsqe->opcode = FW_RI_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	swsqe->idx = qhp->wq.sq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	swsqe->complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	swsqe->signaled = write_signaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	swsqe->flushed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	swsqe->wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (c4iw_wr_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		swsqe->sge_ts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		swsqe->host_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	write_wrid = qhp->wq.sq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	/* just bump the sw_sq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	qhp->wq.sq.in_use++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		qhp->wq.sq.pidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* SEND_WITH_INV swsqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (wr->next->opcode == IB_WR_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		swsqe->opcode = FW_RI_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		swsqe->opcode = FW_RI_SEND_WITH_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	swsqe->idx = qhp->wq.sq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	swsqe->complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	swsqe->signaled = send_signaled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	swsqe->flushed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	swsqe->wr_id = wr->next->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (c4iw_wr_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		swsqe->sge_ts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		swsqe->host_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		    write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	t4_sq_produce(&qhp->wq, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	t4_ring_sq_db(&qhp->wq, idx, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			   const struct ib_recv_wr *wr, u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	ret = build_isgl((__be64 *)qhp->wq.rq.queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	*len16 = DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			  u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	*len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			      wr->num_sge * sizeof(struct fw_ri_sge), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			      const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			      u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	__be64 *p = (__be64 *)fr->pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	fr->r2 = cpu_to_be32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	fr->stag = cpu_to_be32(mhp->ibmr.rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		FW_RI_TPTE_STAGSTATE_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		FW_RI_TPTE_PDID_V(mhp->attr.pdid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	fr->tpte.locread_to_qpid = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	fr->tpte.len_hi = cpu_to_be32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	p[0] = cpu_to_be64((u64)mhp->mpl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	p[1] = cpu_to_be64((u64)mhp->mpl[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	*len16 = DIV_ROUND_UP(sizeof(*fr), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			u8 *len16, bool dsgl_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct fw_ri_immd *imdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	__be64 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	int rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	wqe->fr.qpbinde_to_dcacpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	wqe->fr.addr_type = FW_RI_VA_BASED_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	wqe->fr.len_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	wqe->fr.stag = cpu_to_be32(wr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 					0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		struct fw_ri_dsgl *sglp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		for (i = 0; i < mhp->mpl_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		sglp->op = FW_RI_DATA_DSGL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		sglp->r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		sglp->nsge = cpu_to_be16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		sglp->len0 = cpu_to_be32(pbllen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		imdp->op = FW_RI_DATA_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		imdp->r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		imdp->r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		imdp->immdlen = cpu_to_be32(pbllen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		p = (__be64 *)(imdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		rem = pbllen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		for (i = 0; i < mhp->mpl_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			*p = cpu_to_be64((u64)mhp->mpl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			rem -= sizeof(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			if (++p == (__be64 *)&sq->queue[sq->size])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				p = (__be64 *)sq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		while (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			*p = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			rem -= sizeof(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			if (++p == (__be64 *)&sq->queue[sq->size])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				p = (__be64 *)sq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		*len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				      + pbllen, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			  u8 *len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	wqe->inv.r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	*len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) void c4iw_qp_add_ref(struct ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	pr_debug("ib_qp %p\n", qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) void c4iw_qp_rem_ref(struct ib_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	pr_debug("ib_qp %p\n", qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		complete(&to_c4iw_qp(qp)->qp_rel_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static void add_to_fc_list(struct list_head *head, struct list_head *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (list_empty(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		list_add_tail(entry, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	xa_lock_irqsave(&qhp->rhp->qps, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	spin_lock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (qhp->rhp->db_state == NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		t4_ring_sq_db(&qhp->wq, inc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		qhp->wq.sq.wq_pidx_inc += inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	spin_unlock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	xa_unlock_irqrestore(&qhp->rhp->qps, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	xa_lock_irqsave(&qhp->rhp->qps, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	spin_lock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (qhp->rhp->db_state == NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		t4_ring_rq_db(&qhp->wq, inc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		qhp->wq.rq.wq_pidx_inc += inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	spin_unlock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	xa_unlock_irqrestore(&qhp->rhp->qps, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) static int ib_to_fw_opcode(int ib_opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	int opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	switch (ib_opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		opcode = FW_RI_SEND_WITH_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		opcode = FW_RI_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		opcode = FW_RI_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		opcode = FW_RI_WRITE_IMMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	case IB_WR_RDMA_READ_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		opcode = FW_RI_READ_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	case IB_WR_REG_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		opcode = FW_RI_FAST_REGISTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		opcode = FW_RI_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		opcode = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	return opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static int complete_sq_drain_wr(struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				const struct ib_send_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct t4_cqe cqe = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct c4iw_cq *schp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct t4_cq *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	int opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	schp = to_c4iw_cq(qhp->ibqp.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	cq = &schp->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	opcode = ib_to_fw_opcode(wr->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	if (opcode < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		return opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	cqe.u.drain_cookie = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 				 CQE_OPCODE_V(opcode) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 				 CQE_TYPE_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 				 CQE_SWCQE_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				 CQE_DRAIN_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				 CQE_QPID_V(qhp->wq.sq.qid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	spin_lock_irqsave(&schp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	cq->sw_queue[cq->sw_pidx] = cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	t4_swcq_produce(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	spin_unlock_irqrestore(&schp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (t4_clear_cq_armed(&schp->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		spin_lock_irqsave(&schp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		(*schp->ibcq.comp_handler)(&schp->ibcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 					   schp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				 const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				 const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		ret = complete_sq_drain_wr(qhp, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static void complete_rq_drain_wr(struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 				 const struct ib_recv_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	struct t4_cqe cqe = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct c4iw_cq *rchp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct t4_cq *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	cq = &rchp->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	cqe.u.drain_cookie = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				 CQE_OPCODE_V(FW_RI_SEND) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				 CQE_TYPE_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				 CQE_SWCQE_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				 CQE_DRAIN_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				 CQE_QPID_V(qhp->wq.sq.qid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	spin_lock_irqsave(&rchp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	cq->sw_queue[cq->sw_pidx] = cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	t4_swcq_produce(cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	spin_unlock_irqrestore(&rchp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (t4_clear_cq_armed(&rchp->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		(*rchp->ibcq.comp_handler)(&rchp->ibcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 					   rchp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				  const struct ib_recv_wr *wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		complete_rq_drain_wr(qhp, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		   const struct ib_send_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	u8 len16 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	enum fw_wr_opcodes fw_opcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	enum fw_ri_wr_flags fw_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct c4iw_qp *qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct c4iw_dev *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	union t4_wr *wqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	u32 num_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct t4_swsqe *swsqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	u16 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	qhp = to_c4iw_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	rhp = qhp->rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	spin_lock_irqsave(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 * If the qp has been flushed, then just insert a special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 * drain cqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (qhp->wq.flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		err = complete_sq_drain_wrs(qhp, wr, bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	num_wrs = t4_sq_avail(&qhp->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	if (num_wrs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	 * the response for small NVMEe-oF READ requests.  If the chain is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	 * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	 * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 * request, then build and post the write_cmpl WR. If any of the tests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	 * below are not true, then we continue on with the tradtional WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	 * and SEND WRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (qhp->rhp->rdev.lldi.write_cmpl_support &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	    CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	    CHELSIO_T5 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	    wr && wr->next && !wr->next->next &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	    wr->opcode == IB_WR_RDMA_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	    wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	    (wr->next->opcode == IB_WR_SEND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	    wr->next->opcode == IB_WR_SEND_WITH_INV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	    wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	    wr->next->num_sge == 1 && num_wrs >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		post_write_cmpl(qhp, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (num_wrs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		      qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		fw_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		if (wr->send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			fw_flags |= FW_RI_COMPLETION_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		switch (wr->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			if (wr->send_flags & IB_SEND_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 				fw_flags |= FW_RI_READ_FENCE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			fw_opcode = FW_RI_SEND_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			if (wr->opcode == IB_WR_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				swsqe->opcode = FW_RI_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				swsqe->opcode = FW_RI_SEND_WITH_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			fw_opcode = FW_RI_RDMA_WRITE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			swsqe->opcode = FW_RI_RDMA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		case IB_WR_RDMA_READ_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			fw_opcode = FW_RI_RDMA_READ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			swsqe->opcode = FW_RI_READ_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				fw_flags = FW_RI_RDMA_READ_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 				fw_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			err = build_rdma_read(wqe, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			swsqe->read_len = wr->sg_list[0].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			if (!qhp->wq.sq.oldest_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 				qhp->wq.sq.oldest_read = swsqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		case IB_WR_REG_MR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 			struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			swsqe->opcode = FW_RI_FAST_REGISTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			    !mhp->attr.state && mhp->mpl_len <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 				fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 				build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 						  mhp, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 				fw_opcode = FW_RI_FR_NSMR_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 				err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				       mhp, &len16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				       rhp->rdev.lldi.ulptx_memwrite_dsgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			mhp->attr.state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		case IB_WR_LOCAL_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			if (wr->send_flags & IB_SEND_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 				fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			fw_opcode = FW_RI_INV_LSTAG_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			swsqe->opcode = FW_RI_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			err = build_inv_stag(wqe, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			pr_warn("%s post of type=%d TBD!\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 				wr->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		swsqe->idx = qhp->wq.sq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		swsqe->complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 				  qhp->sq_sig_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		swsqe->flushed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		swsqe->wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		if (c4iw_wr_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			swsqe->sge_ts = cxgb4_read_sge_timestamp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 					rhp->rdev.lldi.ports[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			swsqe->host_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			 swsqe->opcode, swsqe->read_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		num_wrs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		t4_sq_produce(&qhp->wq, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (!rhp->rdev.status_page->db_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		t4_ring_sq_db(&qhp->wq, idx, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		ring_kernel_sq_db(qhp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		      const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct c4iw_qp *qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	union t4_recv_wr *wqe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	u32 num_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	u8 len16 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	u16 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	qhp = to_c4iw_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	spin_lock_irqsave(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	 * If the qp has been flushed, then just insert a special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	 * drain cqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (qhp->wq.flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		complete_rq_drain_wrs(qhp, wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	num_wrs = t4_rq_avail(&qhp->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (num_wrs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		if (wr->num_sge > T4_MAX_RECV_SGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 					   qhp->wq.rq.wq_pidx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 					   T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		if (num_wrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			err = build_rdma_recv(qhp, wqe, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		if (c4iw_wr_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				cxgb4_read_sge_timestamp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 						qhp->rhp->rdev.lldi.ports[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 				ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		wqe->recv.opcode = FW_RI_RECV_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		wqe->recv.r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		wqe->recv.wrid = qhp->wq.rq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		wqe->recv.r2[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		wqe->recv.r2[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		wqe->recv.r2[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		wqe->recv.len16 = len16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		pr_debug("cookie 0x%llx pidx %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		t4_rq_produce(&qhp->wq, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		num_wrs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (!qhp->rhp->rdev.status_page->db_off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		t4_ring_rq_db(&qhp->wq, idx, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		spin_unlock_irqrestore(&qhp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		ring_kernel_rq_db(qhp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			 u64 wr_id, u8 len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		 __func__, srq->cidx, srq->pidx, srq->wq_pidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		 srq->in_use, srq->ooo_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		 (unsigned long long)wr_id, srq->pending_cidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		 srq->pending_pidx, srq->pending_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	pwr->wr_id = wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	pwr->len16 = len16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	memcpy(&pwr->wqe, wqe, len16 * 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	t4_srq_produce_pending_wr(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		       const struct ib_recv_wr **bad_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	union t4_recv_wr *wqe, lwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	struct c4iw_srq *srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	u8 len16 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	u16 idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	u32 num_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	srq = to_c4iw_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	spin_lock_irqsave(&srq->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	num_wrs = t4_srq_avail(&srq->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	if (num_wrs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		spin_unlock_irqrestore(&srq->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	while (wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		if (wr->num_sge > T4_MAX_RECV_SGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		wqe = &lwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		if (num_wrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			err = build_srq_recv(wqe, wr, &len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			*bad_wr = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		wqe->recv.opcode = FW_RI_RECV_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		wqe->recv.r1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		wqe->recv.wrid = srq->wq.pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		wqe->recv.r2[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		wqe->recv.r2[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		wqe->recv.r2[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		wqe->recv.len16 = len16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		if (srq->wq.ooo_count ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		    srq->wq.pending_in_use ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		    srq->wq.sw_rq[srq->wq.pidx].valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			srq->wq.sw_rq[srq->wq.pidx].valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				 __func__, srq->wq.cidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				 srq->wq.pidx, srq->wq.wq_pidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				 srq->wq.in_use,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 				 (unsigned long long)wr->wr_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			t4_srq_produce(&srq->wq, len16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		num_wrs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if (idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		t4_ring_srq_db(&srq->wq, idx, len16, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	spin_unlock_irqrestore(&srq->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				    u8 *ecode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	int tagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	int opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	int rqtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	int send_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	if (!err_cqe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		*ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	status = CQE_STATUS(err_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	opcode = CQE_OPCODE(err_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	rqtype = RQ_TYPE(err_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		   (opcode == FW_RI_SEND_WITH_SE_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	tagged = (opcode == FW_RI_RDMA_WRITE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		 (rqtype && (opcode == FW_RI_READ_RESP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	case T4_ERR_STAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		if (send_inv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			*ecode = RDMAP_CANT_INV_STAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			*ecode = RDMAP_INV_STAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	case T4_ERR_PDID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		if ((opcode == FW_RI_SEND_WITH_INV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		    (opcode == FW_RI_SEND_WITH_SE_INV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			*ecode = RDMAP_CANT_INV_STAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			*ecode = RDMAP_STAG_NOT_ASSOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	case T4_ERR_QPID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		*ecode = RDMAP_STAG_NOT_ASSOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	case T4_ERR_ACCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		*ecode = RDMAP_ACC_VIOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	case T4_ERR_WRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		*ecode = RDMAP_TO_WRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	case T4_ERR_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		if (tagged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			*ecode = DDPT_BASE_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			*ecode = RDMAP_BASE_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	case T4_ERR_INVALIDATE_SHARED_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		*ecode = RDMAP_CANT_INV_STAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	case T4_ERR_ECC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	case T4_ERR_ECC_PSTAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	case T4_ERR_INTERNAL_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		*layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		*ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	case T4_ERR_OUT_OF_RQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		*ecode = DDPU_INV_MSN_NOBUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	case T4_ERR_PBL_ADDR_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		*ecode = DDPT_BASE_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	case T4_ERR_CRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		*layer_type = LAYER_MPA|DDP_LLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		*ecode = MPA_CRC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	case T4_ERR_MARKER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		*layer_type = LAYER_MPA|DDP_LLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		*ecode = MPA_MARKER_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	case T4_ERR_PDU_LEN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		*ecode = DDPU_MSG_TOOBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	case T4_ERR_DDP_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		if (tagged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			*ecode = DDPT_INV_VERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			*ecode = DDPU_INV_VERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	case T4_ERR_RDMA_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		*ecode = RDMAP_INV_VERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	case T4_ERR_OPCODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		*ecode = RDMAP_INV_OPCODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	case T4_ERR_DDP_QUEUE_NUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		*ecode = DDPU_INV_QN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	case T4_ERR_MSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	case T4_ERR_MSN_GAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	case T4_ERR_MSN_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	case T4_ERR_IRD_OVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		*ecode = DDPU_INV_MSN_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	case T4_ERR_TBIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		*layer_type = LAYER_DDP|DDP_LOCAL_CATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		*ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	case T4_ERR_MO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		*ecode = DDPU_INV_MO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		*ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			   gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	struct fw_ri_wr *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	struct terminate_message *term;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		 qhp->ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	if (WARN_ON(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	wqe = __skb_put_zero(skb, sizeof(*wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	wqe->flowid_len16 = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	term = (struct terminate_message *)wqe->u.terminate.termmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		term->layer_etype = qhp->attr.layer_etype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		term->ecode = qhp->attr.ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	c4iw_ofld_send(&qhp->rhp->rdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * Assumes qhp lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		       struct c4iw_cq *schp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	int rq_flushed = 0, sq_flushed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	/* locking hierarchy: cqs lock first, then qp lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	spin_lock_irqsave(&rchp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	if (schp != rchp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		spin_lock(&schp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	spin_lock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (qhp->wq.flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		spin_unlock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		if (schp != rchp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			spin_unlock(&schp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		spin_unlock_irqrestore(&rchp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	qhp->wq.flushed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	t4_set_wq_in_error(&qhp->wq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	c4iw_flush_hw_cq(rchp, qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	if (!qhp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (schp != rchp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		c4iw_flush_hw_cq(schp, qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	sq_flushed = c4iw_flush_sq(qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	spin_unlock(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (schp != rchp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		spin_unlock(&schp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	spin_unlock_irqrestore(&rchp->lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (schp == rchp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		if ((rq_flushed || sq_flushed) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		    t4_clear_cq_armed(&rchp->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			spin_lock_irqsave(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			(*rchp->ibcq.comp_handler)(&rchp->ibcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 						   rchp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			spin_lock_irqsave(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			(*rchp->ibcq.comp_handler)(&rchp->ibcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 						   rchp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			spin_lock_irqsave(&schp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			(*schp->ibcq.comp_handler)(&schp->ibcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 						   schp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static void flush_qp(struct c4iw_qp *qhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	struct c4iw_cq *rchp, *schp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	schp = to_c4iw_cq(qhp->ibqp.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (qhp->ibqp.uobject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		/* for user qps, qhp->wq.flushed is protected by qhp->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		if (qhp->wq.flushed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		qhp->wq.flushed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		t4_set_wq_in_error(&qhp->wq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		t4_set_cq_in_error(&rchp->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		if (schp != rchp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			t4_set_cq_in_error(&schp->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			spin_lock_irqsave(&schp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			(*schp->ibcq.comp_handler)(&schp->ibcq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 					schp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	__flush_qp(qhp, rchp, schp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		     struct c4iw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	struct fw_ri_wr *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	skb = skb_dequeue(&ep->com.ep_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (WARN_ON(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	wqe = __skb_put_zero(skb, sizeof(*wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	wqe->op_compl = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		FW_WR_OP_V(FW_RI_INIT_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		FW_WR_COMPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	wqe->flowid_len16 = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		FW_WR_FLOWID_V(ep->hwtid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	wqe->cookie = (uintptr_t)ep->com.wr_waitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	wqe->u.fini.type = FW_RI_TYPE_FINI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 				 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	pr_debug("ret %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	pr_debug("p2p_type = %d\n", p2p_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	memset(&init->u, 0, sizeof(init->u));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	switch (p2p_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		init->u.write.stag_sink = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		init->u.write.to_sink = cpu_to_be64(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		init->u.write.len16 = DIV_ROUND_UP(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	case FW_RI_INIT_P2PTYPE_READ_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		init->u.write.opcode = FW_RI_RDMA_READ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		init->u.read.stag_src = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		init->u.read.to_src_lo = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		init->u.read.stag_sink = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		init->u.read.to_sink_lo = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	struct fw_ri_wr *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	ret = alloc_ird(rhp, qhp->attr.max_ird);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		qhp->attr.max_ird = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	wqe = __skb_put_zero(skb, sizeof(*wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	wqe->op_compl = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		FW_WR_OP_V(FW_RI_INIT_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		FW_WR_COMPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	wqe->flowid_len16 = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		FW_WR_FLOWID_V(qhp->ep->hwtid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	wqe->u.init.type = FW_RI_TYPE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	wqe->u.init.mpareqbit_p2ptype =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	if (qhp->attr.mpa_attr.recv_marker_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	if (qhp->attr.mpa_attr.xmit_marker_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	if (qhp->attr.mpa_attr.crc_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			    FW_RI_QP_RDMA_WRITE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			    FW_RI_QP_BIND_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	if (!qhp->ibqp.uobject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 				     FW_RI_QP_STAG0_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	if (qhp->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 						  qhp->srq->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 						   rhp->rdev.lldi.vr->rq.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (qhp->attr.mpa_attr.initiator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 				 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	free_ird(rhp, qhp->attr.max_ird);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	pr_debug("ret %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		   enum c4iw_qp_attr_mask mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		   struct c4iw_qp_attributes *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		   int internal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	struct c4iw_qp_attributes newattr = qhp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	int disconnect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	int terminate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	int abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	int free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	struct c4iw_ep *ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	mutex_lock(&qhp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	/* Process attr changes if in IDLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			newattr.enable_rdma_read = attrs->enable_rdma_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			newattr.enable_rdma_write = attrs->enable_rdma_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			newattr.enable_bind = attrs->enable_bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		if (mask & C4IW_QP_ATTR_MAX_ORD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			if (attrs->max_ord > c4iw_max_read_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			newattr.max_ord = attrs->max_ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		if (mask & C4IW_QP_ATTR_MAX_IRD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			if (attrs->max_ird > cur_max_read_depth(rhp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 			newattr.max_ird = attrs->max_ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		qhp->attr = newattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (mask & C4IW_QP_ATTR_SQ_DB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (mask & C4IW_QP_ATTR_RQ_DB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	if (qhp->attr.state == attrs->next_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	switch (qhp->attr.state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	case C4IW_QP_STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		switch (attrs->next_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		case C4IW_QP_STATE_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 			if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 				ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			qhp->attr.mpa_attr = attrs->mpa_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			qhp->ep = qhp->attr.llp_stream_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			set_state(qhp, C4IW_QP_STATE_RTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			 * Ref the endpoint here and deref when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			 * disassociate the endpoint from the QP.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			 * happens in CLOSING->IDLE transition or *->ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			 * transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			c4iw_get_ep(&qhp->ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			ret = rdma_init(rhp, qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		case C4IW_QP_STATE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			set_state(qhp, C4IW_QP_STATE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			flush_qp(qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	case C4IW_QP_STATE_RTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		switch (attrs->next_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		case C4IW_QP_STATE_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 			t4_set_wq_in_error(&qhp->wq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			set_state(qhp, C4IW_QP_STATE_CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 			ep = qhp->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			if (!internal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 				abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 				disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 				c4iw_get_ep(&qhp->ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			ret = rdma_fini(rhp, qhp, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		case C4IW_QP_STATE_TERMINATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			t4_set_wq_in_error(&qhp->wq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 			set_state(qhp, C4IW_QP_STATE_TERMINATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			qhp->attr.layer_etype = attrs->layer_etype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 			qhp->attr.ecode = attrs->ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			ep = qhp->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 			if (!internal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 				c4iw_get_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 				terminate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 				disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 				terminate = qhp->attr.send_term;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 				ret = rdma_fini(rhp, qhp, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 					goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		case C4IW_QP_STATE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 			t4_set_wq_in_error(&qhp->wq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			set_state(qhp, C4IW_QP_STATE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			if (!internal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 				abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 				disconnect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 				ep = qhp->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 				c4iw_get_ep(&qhp->ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	case C4IW_QP_STATE_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		 * Allow kernel users to move to ERROR for qp draining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 				  C4IW_QP_STATE_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		switch (attrs->next_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		case C4IW_QP_STATE_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			flush_qp(qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			set_state(qhp, C4IW_QP_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			qhp->attr.llp_stream_handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			c4iw_put_ep(&qhp->ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			qhp->ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			wake_up(&qhp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		case C4IW_QP_STATE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	case C4IW_QP_STATE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		if (attrs->next_state != C4IW_QP_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		set_state(qhp, C4IW_QP_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	case C4IW_QP_STATE_TERMINATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		if (!internal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		 qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	/* disassociate the LLP connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	qhp->attr.llp_stream_handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		ep = qhp->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	qhp->ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	set_state(qhp, C4IW_QP_STATE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	flush_qp(qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	wake_up(&qhp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	mutex_unlock(&qhp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (terminate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	 * If disconnect is 1, then we need to initiate a disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	 * on the EP.  This can be a normal close (RTS->CLOSING) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	 * an abnormal close (RTS/CLOSING->ERROR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	if (disconnect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 							 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	 * If free is 1, then we've disassociated the EP from the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	 * and we need to dereference the EP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		c4iw_put_ep(&ep->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	pr_debug("exit state %d\n", qhp->attr.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	struct c4iw_dev *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	struct c4iw_qp *qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	struct c4iw_ucontext *ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	qhp = to_c4iw_qp(ib_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	rhp = qhp->rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	ucontext = qhp->ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	attrs.next_state = C4IW_QP_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	wait_event(qhp->wait, !qhp->ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	xa_lock_irq(&rhp->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	__xa_erase(&rhp->qps, qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	if (!list_empty(&qhp->db_fc_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		list_del_init(&qhp->db_fc_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	xa_unlock_irq(&rhp->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	free_ird(rhp, qhp->attr.max_ird);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	c4iw_qp_rem_ref(ib_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	wait_for_completion(&qhp->qp_rel_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	destroy_qp(&rhp->rdev, &qhp->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	c4iw_put_wr_wait(qhp->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	kfree(qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 			     struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	struct c4iw_dev *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	struct c4iw_qp *qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	struct c4iw_pd *php;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	struct c4iw_cq *schp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	struct c4iw_cq *rchp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	struct c4iw_create_qp_resp uresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	unsigned int sqsize, rqsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		udata, struct c4iw_ucontext, ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	pr_debug("ib_pd %p\n", pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	if (attrs->qp_type != IB_QPT_RC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		return ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	php = to_c4iw_pd(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	rhp = php->rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (!schp || !rchp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			return ERR_PTR(-E2BIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		rqsize = attrs->cap.max_recv_wr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		if (rqsize < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 			rqsize = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		return ERR_PTR(-E2BIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	sqsize = attrs->cap.max_send_wr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	if (sqsize < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		sqsize = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	if (!qhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	if (!qhp->wr_waitp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		goto err_free_qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	qhp->wq.sq.size = sqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	qhp->wq.sq.memsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	qhp->wq.sq.flush_cidx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		qhp->wq.rq.size = rqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		qhp->wq.rq.memsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 			(rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 			sizeof(*qhp->wq.rq.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	if (ucontext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		if (!attrs->srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 			qhp->wq.rq.memsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 				roundup(qhp->wq.rq.memsize, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 			qhp->wr_waitp, !attrs->srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		goto err_free_wr_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	attrs->cap.max_recv_wr = rqsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	attrs->cap.max_send_wr = sqsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	qhp->rhp = rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	qhp->attr.pd = php->pdid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	qhp->attr.state = C4IW_QP_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	qhp->attr.next_state = C4IW_QP_STATE_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	qhp->attr.enable_rdma_read = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	qhp->attr.enable_rdma_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	qhp->attr.enable_bind = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	qhp->attr.max_ord = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	qhp->attr.max_ird = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	spin_lock_init(&qhp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	mutex_init(&qhp->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	init_waitqueue_head(&qhp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	init_completion(&qhp->qp_rel_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	refcount_set(&qhp->qp_refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		goto err_destroy_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	if (udata && ucontext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		if (!sq_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 			goto err_remove_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 			rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 			if (!rq_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 				goto err_free_sq_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		if (!sq_db_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			goto err_free_rq_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			rq_db_key_mm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 				kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			if (!rq_db_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 				goto err_free_sq_db_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		memset(&uresp, 0, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		if (t4_sq_onchip(&qhp->wq.sq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 			ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 						 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 			if (!ma_sync_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 				goto err_free_rq_db_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			uresp.flags = C4IW_QPF_ONCHIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		if (rhp->rdev.lldi.write_w_imm_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 			uresp.flags |= C4IW_QPF_WRITE_W_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		uresp.qid_mask = rhp->rdev.qpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		uresp.sqid = qhp->wq.sq.qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		uresp.sq_size = qhp->wq.sq.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		uresp.sq_memsize = qhp->wq.sq.memsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 			uresp.rqid = qhp->wq.rq.qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			uresp.rq_size = qhp->wq.rq.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			uresp.rq_memsize = qhp->wq.rq.memsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		spin_lock(&ucontext->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		if (ma_sync_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			uresp.ma_sync_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 			ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		uresp.sq_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			uresp.rq_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 			ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		uresp.sq_db_gts_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 			uresp.rq_db_gts_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 			ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		spin_unlock(&ucontext->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 			goto err_free_ma_sync_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		sq_key_mm->key = uresp.sq_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		sq_key_mm->addr = qhp->wq.sq.phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		insert_mmap(ucontext, sq_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			rq_key_mm->key = uresp.rq_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			insert_mmap(ucontext, rq_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		sq_db_key_mm->key = uresp.sq_db_gts_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		sq_db_key_mm->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		insert_mmap(ucontext, sq_db_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			rq_db_key_mm->key = uresp.rq_db_gts_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 			rq_db_key_mm->addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 				(u64)(unsigned long)qhp->wq.rq.bar2_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 			rq_db_key_mm->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			insert_mmap(ucontext, rq_db_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		if (ma_sync_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			ma_sync_key_mm->key = uresp.ma_sync_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			ma_sync_key_mm->addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 				(pci_resource_start(rhp->rdev.lldi.pdev, 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 				PCIE_MA_SYNC_A) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 			ma_sync_key_mm->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			insert_mmap(ucontext, ma_sync_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		qhp->ucontext = ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	if (!attrs->srq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		qhp->wq.qp_errp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 			&qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		qhp->wq.qp_errp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 			&qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		qhp->wq.srqidxp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 			&qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	qhp->ibqp.qp_num = qhp->wq.sq.qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	if (attrs->srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		qhp->srq = to_c4iw_srq(attrs->srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	INIT_LIST_HEAD(&qhp->db_fc_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	return &qhp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) err_free_ma_sync_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	kfree(ma_sync_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) err_free_rq_db_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	if (!attrs->srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		kfree(rq_db_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) err_free_sq_db_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	kfree(sq_db_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) err_free_rq_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	if (!attrs->srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		kfree(rq_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) err_free_sq_key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	kfree(sq_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) err_remove_handle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) err_destroy_qp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	destroy_qp(&rhp->rdev, &qhp->wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) err_free_wr_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	c4iw_put_wr_wait(qhp->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) err_free_qhp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	kfree(qhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		      int attr_mask, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	struct c4iw_dev *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	struct c4iw_qp *qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	enum c4iw_qp_attr_mask mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	struct c4iw_qp_attributes attrs = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	pr_debug("ib_qp %p\n", ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	/* iwarp does not support the RTR state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		attr_mask &= ~IB_QP_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	/* Make sure we still have something left to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	if (!attr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	qhp = to_c4iw_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	rhp = qhp->rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	attrs.next_state = c4iw_convert_state(attr->qp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	attrs.enable_rdma_read = (attr->qp_access_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	attrs.enable_rdma_write = (attr->qp_access_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 			(C4IW_QP_ATTR_ENABLE_RDMA_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 			 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 			 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	 * ringing the queue db when we're in DB_FULL mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	 * Only allow this on T4 devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	attrs.sq_db_inc = attr->sq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	attrs.rq_db_inc = attr->rq_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	    (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	struct ib_event event = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	event.device = &srq->rhp->ibdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	event.element.srq = &srq->ibsrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	event.event = IB_EVENT_SRQ_LIMIT_REACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	ib_dispatch_event(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		    enum ib_srq_attr_mask srq_attr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		    struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	 * XXX 0 mask == a SW interrupt for srq_limit reached...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	if (udata && !srq_attr_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		c4iw_dispatch_srq_limit_reached_event(srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	/* no support for this yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	if (srq_attr_mask & IB_SRQ_MAX_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		srq->armed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		srq->srq_limit = attr->srq_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		     int attr_mask, struct ib_qp_init_attr *init_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	memset(attr, 0, sizeof(*attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	memset(init_attr, 0, sizeof(*init_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	attr->qp_state = to_ib_qp_state(qhp->attr.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 			   struct c4iw_wr_wait *wr_waitp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	struct c4iw_rdev *rdev = &srq->rhp->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	struct sk_buff *skb = srq->destroy_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	struct t4_srq *wq = &srq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	struct fw_ri_res_wr *res_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	struct fw_ri_res *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	int wr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	wr_len = sizeof(*res_wr) + sizeof(*res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	memset(res_wr, 0, wr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 			FW_RI_RES_WR_NRES_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 			FW_WR_COMPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	res_wr->cookie = (uintptr_t)wr_waitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	res = res_wr->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	res->u.srq.op = FW_RI_RES_OP_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	res->u.srq.srqid = cpu_to_be32(srq->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	res->u.srq.eqid = cpu_to_be32(wq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	c4iw_init_wr_wait(wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	dma_free_coherent(&rdev->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 			  wq->memsize, wq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 			dma_unmap_addr(wq, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	kfree(wq->sw_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	c4iw_put_qpid(rdev, wq->qid, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			   struct c4iw_wr_wait *wr_waitp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	struct c4iw_rdev *rdev = &srq->rhp->rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	int user = (uctx != &rdev->uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	struct t4_srq *wq = &srq->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	struct fw_ri_res_wr *res_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	struct fw_ri_res *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	int wr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	int eqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	wq->qid = c4iw_get_qpid(rdev, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	if (!wq->qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	if (!user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		if (!wq->sw_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 			goto err_put_qpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		wq->pending_wrs = kcalloc(srq->wq.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 					  sizeof(*srq->wq.pending_wrs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		if (!wq->pending_wrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 			goto err_free_sw_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	wq->rqt_size = wq->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	if (!wq->rqt_hwaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		goto err_free_pending_wrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		T4_RQT_ENTRY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 				       &wq->dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	if (!wq->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		goto err_free_rqtpool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	dma_unmap_addr_set(wq, mapping, wq->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 				      &wq->bar2_qid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			user ? &wq->bar2_pa : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	 * User mode must have bar2 access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	if (user && !wq->bar2_va) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			pci_name(rdev->lldi.pdev), wq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 		goto err_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	/* build fw_ri_res_wr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	wr_len = sizeof(*res_wr) + sizeof(*res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	skb = alloc_skb(wr_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		goto err_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	memset(res_wr, 0, wr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			FW_RI_RES_WR_NRES_V(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 			FW_WR_COMPL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	res_wr->cookie = (uintptr_t)wr_waitp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	res = res_wr->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	res->u.srq.op = FW_RI_RES_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	 * eqsize is the number of 64B entries plus the status page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	eqsize = wq->size * T4_RQ_NUM_SLOTS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		rdev->hw_queue.t4_eq_status_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	res->u.srq.eqid = cpu_to_be32(wq->qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	res->u.srq.fetchszm_to_iqid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 						/* no host cidx updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		FW_RI_RES_WR_FETCHRO_V(0));     /* relaxed_ordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	res->u.srq.dcaen_to_eqsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		FW_RI_RES_WR_DCACPU_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		FW_RI_RES_WR_FBMIN_V(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		FW_RI_RES_WR_FBMAX_V(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 		FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		FW_RI_RES_WR_CIDXFTHRESH_V(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		FW_RI_RES_WR_EQSIZE_V(eqsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	res->u.srq.srqid = cpu_to_be32(srq->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	res->u.srq.pdid = cpu_to_be32(srq->pdid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 			rdev->lldi.vr->rq.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	c4iw_init_wr_wait(wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 		goto err_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 			" bar2_addr %p rqt addr 0x%x size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 			__func__, srq->idx, wq->qid, srq->pdid, wq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 			(u64)virt_to_phys(wq->queue), wq->bar2_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 			wq->rqt_hwaddr, wq->rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) err_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	dma_free_coherent(&rdev->lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			  wq->memsize, wq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 			dma_unmap_addr(wq, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) err_free_rqtpool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) err_free_pending_wrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		kfree(wq->pending_wrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) err_free_sw_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	if (!user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		kfree(wq->sw_rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) err_put_qpid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	c4iw_put_qpid(rdev, wq->qid, uctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	u64 *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	src = (u64 *)wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	while (len16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 		*dst++ = *src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		if (dst >= (u64 *)&srq->queue[srq->size])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 			dst = (u64 *)srq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		*dst++ = *src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		if (dst >= (u64 *)&srq->queue[srq->size])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 			dst = (u64 *)srq->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		len16--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 			       struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	struct ib_pd *pd = ib_srq->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	struct c4iw_dev *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	struct c4iw_pd *php;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	struct c4iw_create_srq_resp uresp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	struct c4iw_ucontext *ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	int rqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	int wr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	pr_debug("%s ib_pd %p\n", __func__, pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	php = to_c4iw_pd(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	rhp = php->rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	if (!rhp->rdev.lldi.vr->srq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	rqsize = attrs->attr.max_wr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 					     ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	if (!srq->wr_waitp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	if (srq->idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		goto err_free_wr_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (!srq->destroy_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		goto err_free_srq_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	srq->rhp = rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	srq->pdid = php->pdid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	srq->wq.size = rqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	srq->wq.memsize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 		(rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		sizeof(*srq->wq.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	if (ucontext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 			&rhp->rdev.uctx, srq->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	attrs->attr.max_wr = rqsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		srq->flags = T4_SRQ_LIMIT_SUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	if (udata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		if (!srq_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			goto err_free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		if (!srq_db_key_mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 			goto err_free_srq_key_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		memset(&uresp, 0, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		uresp.flags = srq->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		uresp.qid_mask = rhp->rdev.qpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		uresp.srqid = srq->wq.qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		uresp.srq_size = srq->wq.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		uresp.srq_memsize = srq->wq.memsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		spin_lock(&ucontext->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		uresp.srq_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 		ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		uresp.srq_db_gts_key = ucontext->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		ucontext->key += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		spin_unlock(&ucontext->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			goto err_free_srq_db_key_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		srq_key_mm->key = uresp.srq_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		srq_key_mm->addr = virt_to_phys(srq->wq.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		insert_mmap(ucontext, srq_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		srq_db_key_mm->key = uresp.srq_db_gts_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		srq_db_key_mm->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		insert_mmap(ucontext, srq_db_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		 __func__, srq->wq.qid, srq->idx, srq->wq.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 			(unsigned long)srq->wq.memsize, attrs->attr.max_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	spin_lock_init(&srq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) err_free_srq_db_key_mm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	kfree(srq_db_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) err_free_srq_key_mm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	kfree(srq_key_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) err_free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		       srq->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) err_free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	kfree_skb(srq->destroy_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) err_free_srq_idx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	c4iw_free_srq_idx(&rhp->rdev, srq->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) err_free_wr_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	c4iw_put_wr_wait(srq->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	struct c4iw_dev *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	struct c4iw_srq *srq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	struct c4iw_ucontext *ucontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	srq = to_c4iw_srq(ibsrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	rhp = srq->rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	pr_debug("%s id %d\n", __func__, srq->wq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 					     ibucontext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		       srq->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	c4iw_free_srq_idx(&rhp->rdev, srq->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	c4iw_put_wr_wait(srq->wr_waitp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }