Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * COPYING in the main directory of this source tree, or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * OpenIB.org BSD license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *     Redistribution and use in source and binary forms, with or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *     without modification, are permitted provided that the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *     conditions are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *      - Redistributions of source code must retain the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *        disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *      - Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *        copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *        disclaimer in the documentation and/or other materials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *        provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include "iw_cxgb4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static void print_tpte(struct c4iw_dev *dev, u32 stag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct fw_ri_tpte tpte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			      (__be32 *)&tpte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		dev_err(&dev->rdev.lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			"%s cxgb4_read_tpte err %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		 stag & 0xffffff00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	__be64 *p = (void *)err_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	dev_err(&dev->rdev.lldi.pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		"AE qpid %d opcode %d status 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		"type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		 be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		 be64_to_cpu(p[6]), be64_to_cpu(p[7]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * Ingress WRITE and READ_RESP errors provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * the offending stag, so parse and log it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 				 CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		print_tpte(dev, CQE_WRID_STAG(err_cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			  struct c4iw_qp *qhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			  struct t4_cqe *err_cqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			  enum ib_event_type ib_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct ib_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct c4iw_qp_attributes attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	dump_err_cqe(dev, err_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (qhp->attr.state == C4IW_QP_STATE_RTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			       &attrs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	event.event = ib_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	event.device = chp->ibcq.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (ib_event == IB_EVENT_CQ_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		event.element.cq = &chp->ibcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		event.element.qp = &qhp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (qhp->ibqp.event_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (t4_clear_cq_armed(&chp->cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		spin_lock_irqsave(&chp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct c4iw_cq *chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct c4iw_qp *qhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	u32 cqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	xa_lock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (!qhp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		       CQE_QPID(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		       CQE_WRID_LOW(err_cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		xa_unlock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (SQ_TYPE(err_cqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		cqid = qhp->attr.scq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		cqid = qhp->attr.rcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	chp = get_chp(dev, cqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (!chp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		       cqid, CQE_QPID(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		       CQE_WRID_LOW(err_cqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		xa_unlock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	c4iw_qp_add_ref(&qhp->ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	atomic_inc(&chp->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	xa_unlock_irq(&dev->qps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/* Bad incoming write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (RQ_TYPE(err_cqe) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	    (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	switch (CQE_STATUS(err_cqe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/* Completion Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	case T4_ERR_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		pr_err("AE with status 0!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	case T4_ERR_STAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	case T4_ERR_PDID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	case T4_ERR_QPID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	case T4_ERR_ACCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	case T4_ERR_WRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	case T4_ERR_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	case T4_ERR_INVALIDATE_SHARED_MR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* Device Fatal Errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	case T4_ERR_ECC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	case T4_ERR_ECC_PSTAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	case T4_ERR_INTERNAL_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	/* QP Fatal Errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	case T4_ERR_OUT_OF_RQE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	case T4_ERR_PBL_ADDR_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	case T4_ERR_CRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	case T4_ERR_MARKER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	case T4_ERR_PDU_LEN_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	case T4_ERR_DDP_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	case T4_ERR_RDMA_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	case T4_ERR_OPCODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	case T4_ERR_DDP_QUEUE_NUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	case T4_ERR_MSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	case T4_ERR_TBIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	case T4_ERR_MO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	case T4_ERR_MSN_GAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	case T4_ERR_MSN_RANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	case T4_ERR_RQE_ADDR_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	case T4_ERR_IRD_OVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		       CQE_STATUS(err_cqe), qhp->wq.sq.qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (atomic_dec_and_test(&chp->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		wake_up(&chp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	c4iw_qp_rem_ref(&qhp->ibqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct c4iw_cq *chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	xa_lock_irqsave(&dev->cqs, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	chp = xa_load(&dev->cqs, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (chp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		atomic_inc(&chp->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		xa_unlock_irqrestore(&dev->cqs, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		t4_clear_cq_armed(&chp->cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		spin_lock_irqsave(&chp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		if (atomic_dec_and_test(&chp->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			wake_up(&chp->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		pr_debug("unknown cqid 0x%x\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		xa_unlock_irqrestore(&dev->cqs, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }