Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Copyright(c) 2015 - 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This file is provided under a dual BSD/GPLv2 license.  When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *  - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *    notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *  - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *    notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *    the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *    distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *  - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *    contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *    from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #include "mad.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include "verbs_txreq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return (gid->global.interface_id == id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		(gid->global.subnet_prefix == gid_prefix ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * This should be called with the QP r_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * The s_lock will be acquired around the hfi1_migrate_qp() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	__be64 guid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u32 dlid = packet->dlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	u32 slid = packet->slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	u32 sl = packet->sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	bool migrated = packet->migrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u16 pkey = packet->pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		if (!packet->grh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			     IB_AH_GRH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			    (packet->etype != RHF_RCV_TYPE_BYPASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			const struct ib_global_route *grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			      IB_AH_GRH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			grh = rdma_ah_read_grh(&qp->alt_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			guid = get_sguid(ibp, grh->sgid_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				    guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			if (!gid_ok(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				&packet->grh->sgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				grh->dgid.global.subnet_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				grh->dgid.global.interface_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 					    sc5, slid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 				      slid, dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		    ppd_from_ibp(ibp)->port !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			rdma_ah_get_port_num(&qp->alt_ah_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		hfi1_migrate_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (!packet->grh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			     IB_AH_GRH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			    (packet->etype != RHF_RCV_TYPE_BYPASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			const struct ib_global_route *grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 						   IB_AH_GRH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			grh = rdma_ah_read_grh(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			guid = get_sguid(ibp, grh->sgid_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 				    guid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			if (!gid_ok(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			     &packet->grh->sgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			     grh->dgid.global.subnet_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			     grh->dgid.global.interface_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 					    sc5, slid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				      slid, dlid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		/* Validate the SLID. See Ch. 9.6.1.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		    ppd_from_ibp(ibp)->port != qp->port_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		if (qp->s_mig_state == IB_MIG_REARM && !migrated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			qp->s_mig_state = IB_MIG_ARMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * hfi1_make_grh - construct a GRH header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @ibp: a pointer to the IB port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * @hdr: a pointer to the GRH header being constructed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * @grh: the global route address to send to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * @hwords: size of header after grh being sent in dwords
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * @nwords: the number of 32 bit words of data being sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * Return the size of the header in 32 bit words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		  const struct ib_global_route *grh, u32 hwords, u32 nwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	hdr->version_tclass_flow =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			    (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			    (grh->flow_label << IB_GRH_FLOW_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	/* next_hdr is defined by C8-7 in ch. 8.4.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	hdr->next_hdr = IB_GRH_NEXT_HDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	hdr->hop_limit = grh->hop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	/* The SGID is 32-bit aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	hdr->sgid.global.interface_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		grh->sgid_index < HFI1_GUIDS_PER_PORT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		get_sguid(ibp, grh->sgid_index) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		get_sguid(ibp, HFI1_PORT_GUID_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	hdr->dgid = grh->dgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	/* GRH header size in 32-bit words. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return sizeof(struct ib_grh) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			      hdr.ibh.u.oth.bth[2]) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * build_ahg - create ahg in s_ahg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * @qp: a pointer to QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * @npsn: the next PSN for the request/response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * This routine handles the AHG by allocating an ahg entry and causing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * copy of the first middle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * Subsequent middles use the copied entry, editing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * PSN with 1 or 2 edits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		clear_ahg(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		/* first middle that needs copy  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (qp->s_ahgidx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		if (qp->s_ahgidx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			qp->s_ahgpsn = npsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			/* save to protect a change in another thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			priv->s_ahg->ahgidx = qp->s_ahgidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			qp->s_flags |= HFI1_S_AHG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		/* subsequent middle after valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		if (qp->s_ahgidx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			priv->s_ahg->ahgidx = qp->s_ahgidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			priv->s_ahg->ahgcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			priv->s_ahg->ahgdesc[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				sdma_build_ahg_descriptor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 					(__force u16)cpu_to_be16((u16)npsn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 					BTH2_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 					16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 					16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			if ((npsn & 0xffff0000) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 					(qp->s_ahgpsn & 0xffff0000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				priv->s_ahg->ahgcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 				priv->s_ahg->ahgdesc[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 					sdma_build_ahg_descriptor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 						(__force u16)cpu_to_be16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 							(u16)(npsn >> 16)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 						BTH2_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 						0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 						16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				     struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				     u32 bth0, u32 bth1, u32 bth2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	ohdr->bth[0] = cpu_to_be32(bth0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	ohdr->bth[1] = cpu_to_be32(bth1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	ohdr->bth[2] = cpu_to_be32(bth2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * hfi1_make_ruc_header_16B - build a 16B header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * @qp: the queue pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * @ohdr: a pointer to the destination header memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * @bth0: bth0 passed in from the RC/UC builder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * @bth2: bth2 passed in from the RC/UC builder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * @middle: non zero implies indicates ahg "could" be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * @ps: the current packet state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * This routine may disarm ahg under these situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * - packet needs a GRH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * - BECN needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * - migration state not IB_MIG_MIGRATED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 					    struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 					    u32 bth0, u32 bth1, u32 bth2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 					    int middle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 					    struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct hfi1_ibport *ibp = ps->ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	u32 slid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	u8 l4 = OPA_16B_L4_IB_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	u8 extra_bytes = hfi1_get_16b_padding(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 				(ps->s_txreq->hdr_dwords << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 				ps->s_txreq->s_cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				 extra_bytes + SIZE_OF_LT) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	bool becn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	    hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		struct ib_grh *grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		struct ib_global_route *grd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			rdma_ah_retrieve_grh(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		 * Ensure OPA GIDs are transformed to IB gids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		 * before creating the GRH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		if (grd->sgid_index == OPA_GID_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			grd->sgid_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		l4 = OPA_16B_L4_IB_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		ps->s_txreq->hdr_dwords +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			hfi1_make_grh(ibp, grh, grd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				      ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				      nwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (qp->s_mig_state == IB_MIG_MIGRATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		bth1 |= OPA_BTH_MIG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	if (qp->s_flags & RVT_S_ECN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		qp->s_flags &= ~RVT_S_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		/* we recently received a FECN, so return a BECN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		becn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (middle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		build_ahg(qp, bth2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		qp->s_flags &= ~HFI1_S_AHG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	bth0 |= pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	bth0 |= extra_bytes << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	if (!ppd->lid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		slid = be32_to_cpu(OPA_LID_PERMISSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		slid = ppd->lid |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			(rdma_ah_get_path_bits(&qp->remote_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			((1 << ppd->lmc) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			  slid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			  opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 				      16B),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			  (ps->s_txreq->hdr_dwords + nwords) >> 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			  pkey, becn, 0, l4, priv->s_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * hfi1_make_ruc_header_9B - build a 9B header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * @qp: the queue pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * @ohdr: a pointer to the destination header memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * @bth0: bth0 passed in from the RC/UC builder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * @bth2: bth2 passed in from the RC/UC builder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * @middle: non zero implies indicates ahg "could" be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * @ps: the current packet state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * This routine may disarm ahg under these situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * - packet needs a GRH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * - BECN needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * - migration state not IB_MIG_MIGRATED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 					   struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 					   u32 bth0, u32 bth1, u32 bth2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 					   int middle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 					   struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct hfi1_ibport *ibp = ps->ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	u16 lrh0 = HFI1_LRH_BTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 					 extra_bytes) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		lrh0 = HFI1_LRH_GRH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		ps->s_txreq->hdr_dwords +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			hfi1_make_grh(ibp, grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 				      rdma_ah_read_grh(&qp->remote_ah_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 				      ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 				      nwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	lrh0 |= (priv->s_sc & 0xf) << 12 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		(rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (qp->s_mig_state == IB_MIG_MIGRATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		bth0 |= IB_BTH_MIG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (qp->s_flags & RVT_S_ECN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		qp->s_flags &= ~RVT_S_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		/* we recently received a FECN, so return a BECN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	if (middle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		build_ahg(qp, bth2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		qp->s_flags &= ~HFI1_S_AHG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	bth0 |= pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	bth0 |= extra_bytes << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			 lrh0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			 ps->s_txreq->hdr_dwords + nwords,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			 ppd_from_ibp(ibp)->lid |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				rdma_ah_get_path_bits(&qp->remote_ah_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				  struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 				  u32 bth0, u32 bth1, u32 bth2, int middle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 				  struct hfi1_pkt_state *ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* We support only two types - 9B and 16B for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	[HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	[HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			  u32 bth0, u32 bth1, u32 bth2, int middle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			  struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	 * reset s_ahg/AHG fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	 * This insures that the ahgentry/ahgcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	 * are at a non-AHG default to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	 * build_verbs_tx_desc() from using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	 * an include ahgidx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	 * build_ahg() will modify as appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	 * to use the AHG feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	priv->s_ahg->tx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	priv->s_ahg->ahgcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	priv->s_ahg->ahgidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	/* Make the appropriate header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 					    ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* when sending, force a reschedule every one of these periods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #define SEND_RESCHED_TIMEOUT (5 * HZ)  /* 5s in jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * hfi1_schedule_send_yield - test for a yield required for QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * send engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * @timeout: Final time for timeout slice for jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * @qp: a pointer to QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * @ps: a pointer to a structure with commonly lookup values for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  *      the the send engine progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  * @tid - true if it is the tid leg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * This routine checks if the time slice for the QP has expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  * for RC QPs, if so an additional work entry is queued. At this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  * point, other QPs have an opportunity to be scheduled. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  * returns true if a yield is required, otherwise, false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  * is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			      bool tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	ps->pkts_sent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (unlikely(time_after(jiffies, ps->timeout))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		if (!ps->in_thread ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		    workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			spin_lock_irqsave(&qp->s_lock, ps->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			if (!tid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 				qp->s_flags &= ~RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 				struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				if (priv->s_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 				    HFI1_S_TID_BUSY_SET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 					qp->s_flags &= ~RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 					priv->s_flags &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 						~(HFI1_S_TID_BUSY_SET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 						  RVT_S_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 					priv->s_flags &= ~RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				hfi1_schedule_tid_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			spin_unlock_irqrestore(&qp->s_lock, ps->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			this_cpu_inc(*ps->ppd->dd->send_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			trace_hfi1_rc_expired_time_slice(qp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		this_cpu_inc(*ps->ppd->dd->send_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		ps->timeout = jiffies + ps->timeout_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	trace_hfi1_rc_expired_time_slice(qp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) void hfi1_do_send_from_rvt(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	hfi1_do_send(qp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) void _hfi1_do_send(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	struct iowait_work *w = container_of(work, struct iowait_work, iowork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	struct rvt_qp *qp = iowait_to_qp(w->iow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	hfi1_do_send(qp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)  * hfi1_do_send - perform a send on a QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  * @qp: a pointer to the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  * @in_thread: true if in a workqueue thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * Process entries in the send work queue until credit or queue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  * exhausted.  Only allow one CPU to send a packet per QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  * Otherwise, two threads could send packets out of order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct hfi1_pkt_state ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	ps.dev = to_idev(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	ps.ppd = ppd_from_ibp(ps.ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	ps.in_thread = in_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	ps.wait = iowait_get_ib_work(&priv->s_iowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	trace_hfi1_rc_do_send(qp, in_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	switch (qp->ibqp.qp_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	case IB_QPT_RC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 				   ~((1 << ps.ppd->lmc) - 1)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 				  ps.ppd->lid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			rvt_ruc_loopback(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		make_req = hfi1_make_rc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		ps.timeout_int = qp->timeout_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	case IB_QPT_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 				   ~((1 << ps.ppd->lmc) - 1)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 				  ps.ppd->lid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			rvt_ruc_loopback(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		make_req = hfi1_make_uc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		ps.timeout_int = SEND_RESCHED_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		make_req = hfi1_make_ud_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		ps.timeout_int = SEND_RESCHED_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	spin_lock_irqsave(&qp->s_lock, ps.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	/* Return if we are already busy processing a work request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	if (!hfi1_send_ok(qp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		spin_unlock_irqrestore(&qp->s_lock, ps.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	qp->s_flags |= RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	ps.timeout_int = ps.timeout_int / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	ps.timeout = jiffies + ps.timeout_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	ps.cpu = priv->s_sde ? priv->s_sde->cpu :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 			cpumask_first(cpumask_of_node(ps.ppd->dd->node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	ps.pkts_sent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	/* insure a pre-built packet is handled  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		/* Check for a constructed packet to be sent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		if (ps.s_txreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			if (priv->s_flags & HFI1_S_TID_BUSY_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 				qp->s_flags |= RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 			spin_unlock_irqrestore(&qp->s_lock, ps.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 			 * If the packet cannot be sent now, return and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 			 * the send engine will be woken up later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 			if (hfi1_verbs_send(qp, &ps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 			/* allow other tasks to run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 			if (hfi1_schedule_send_yield(qp, &ps, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			spin_lock_irqsave(&qp->s_lock, ps.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	} while (make_req(qp, &ps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	spin_unlock_irqrestore(&qp->s_lock, ps.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }