^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright(c) 2015 - 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is provided under a dual BSD/GPLv2 license. When using or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * redistributing this file, you may do so under either license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * GPL LICENSE SUMMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of version 2 of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * BSD LICENSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * notice, this list of conditions and the following disclaimer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the documentation and/or other materials provided with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - Neither the name of Intel Corporation nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <rdma/rdma_vt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <rdma/rdmavt_qp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "rc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "verbs_txreq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u8 *prev_ack, bool *scheduled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) __must_hold(&qp->s_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rvt_ack_entry *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u8 i, p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) bool s = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) for (i = qp->r_head_ack_queue; ; i = p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (i == qp->s_tail_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) s = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) p = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (p == qp->r_head_ack_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) e = &qp->s_ack_queue[p];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!e->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (cmp_psn(psn, e->psn) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (p == qp->s_tail_ack_queue &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) cmp_psn(psn, e->lpsn) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) s = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *prev = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (prev_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *prev_ack = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (scheduled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *scheduled = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @dev: the device for this QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @qp: a pointer to the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @ohdr: a pointer to the IB header being constructed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @ps: the xmit packet state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Return 1 if constructed; otherwise, return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Note that we are in the responder's side of the QP context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Note the QP s_lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct rvt_ack_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 hwords, hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 bth0 = 0, bth2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 pmtu = qp->pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct hfi1_qp_priv *qpriv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bool last_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u8 next = qp->s_tail_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct tid_rdma_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) trace_hfi1_rsp_make_rc_ack(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Don't send an ACK if we aren't supposed to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* header size in 32-bit words LRH+BTH = (8+12)/4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) hwords = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) hwords = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) switch (qp->s_ack_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) case OP(RDMA_READ_RESPONSE_LAST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case OP(RDMA_READ_RESPONSE_ONLY):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) e = &qp->s_ack_queue[qp->s_tail_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) release_rdma_sge_mr(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) case OP(ATOMIC_ACKNOWLEDGE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * We can increment the tail pointer now that the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * response has been sent instead of only being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * constructed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (++next > rvt_size_atomic(&dev->rdi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Only advance the s_acked_ack_queue pointer if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * have been no TID RDMA requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) e = &qp->s_ack_queue[qp->s_tail_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (e->opcode != TID_OP(WRITE_REQ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) qp->s_acked_ack_queue == qp->s_tail_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) qp->s_acked_ack_queue = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) qp->s_tail_ack_queue = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) trace_hfi1_rsp_make_rc_ack(qp, e->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case OP(SEND_ONLY):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) case OP(ACKNOWLEDGE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Check for no next entry in the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (qp->s_flags & RVT_S_ACK_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) goto normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) e = &qp->s_ack_queue[qp->s_tail_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Check for tid write fence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) hfi1_tid_rdma_ack_interlock(qp, e)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (e->opcode == OP(RDMA_READ_REQUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * If a RDMA read response is being resent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * we haven't seen the duplicate request yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * then stop sending the remaining responses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * responder has seen until the requester re-sends it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) len = e->rdma_sge.sge_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (len && !e->rdma_sge.mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (qp->s_acked_ack_queue ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) qp->s_tail_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) qp->s_acked_ack_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) qp->r_head_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) qp->s_tail_ack_queue = qp->r_head_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Copy SGE state in case we need to resend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ps->s_txreq->mr = e->rdma_sge.mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ps->s_txreq->mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) rvt_get_mr(ps->s_txreq->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) qp->s_ack_rdma_sge.sge = e->rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) qp->s_ack_rdma_sge.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ps->s_txreq->ss = &qp->s_ack_rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (len > pmtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) len = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) e->sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ohdr->u.aeth = rvt_compute_aeth(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) hwords++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) qp->s_ack_rdma_psn = e->psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) bth2 = mask_psn(qp->s_ack_rdma_psn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } else if (e->opcode == TID_OP(WRITE_REQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * If a TID RDMA WRITE RESP is being resent, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * wait for the actual request. All requests that are to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * be resent will have their state set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * TID_REQUEST_RESEND. When the new request arrives, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * state will be changed to TID_REQUEST_RESEND_ACTIVE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) req = ack_to_tid_req(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (req->state == TID_REQUEST_RESEND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) req->state == TID_REQUEST_INIT_RESEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) qp->s_ack_state = TID_OP(WRITE_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto write_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) } else if (e->opcode == TID_OP(READ_REQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * If a TID RDMA read response is being resent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * we haven't seen the duplicate request yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * then stop sending the remaining responses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * responder has seen until the requester re-sends it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) len = e->rdma_sge.sge_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (len && !e->rdma_sge.mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (qp->s_acked_ack_queue ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) qp->s_tail_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) qp->s_acked_ack_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) qp->r_head_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) qp->s_tail_ack_queue = qp->r_head_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Copy SGE state in case we need to resend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ps->s_txreq->mr = e->rdma_sge.mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (ps->s_txreq->mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rvt_get_mr(ps->s_txreq->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) qp->s_ack_rdma_sge.sge = e->rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) qp->s_ack_rdma_sge.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) qp->s_ack_state = TID_OP(READ_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto read_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* COMPARE_SWAP or FETCH_ADD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ps->s_txreq->ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ohdr->u.at.aeth = rvt_compute_aeth(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) hwords += sizeof(ohdr->u.at) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) bth2 = mask_psn(e->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) e->sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) trace_hfi1_tid_write_rsp_make_rc_ack(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) bth0 = qp->s_ack_state << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) case OP(RDMA_READ_RESPONSE_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case OP(RDMA_READ_RESPONSE_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ps->s_txreq->ss = &qp->s_ack_rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (ps->s_txreq->mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rvt_get_mr(ps->s_txreq->mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) len = qp->s_ack_rdma_sge.sge.sge_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (len > pmtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) len = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) middle = HFI1_CAP_IS_KSET(SDMA_AHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ohdr->u.aeth = rvt_compute_aeth(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) hwords++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) e = &qp->s_ack_queue[qp->s_tail_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) e->sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bth0 = qp->s_ack_state << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bth2 = mask_psn(qp->s_ack_rdma_psn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) case TID_OP(WRITE_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) write_resp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * 1. Check if RVT_S_ACK_PENDING is set. If yes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * goto normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * 2. Attempt to allocate TID resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * 3. Remove RVT_S_RESP_PENDING flags from s_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * 4. If resources not available:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * 4.1 Set RVT_S_WAIT_TID_SPACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * 4.2 Queue QP on RCD TID queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * 4.3 Put QP on iowait list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * 4.4 Build IB RNR NAK with appropriate timeout value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * 4.5 Return indication progress made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * 5. If resources are available:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * 5.1 Program HW flow CSRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * 5.2 Build TID RDMA WRITE RESP packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * 5.3 If more resources needed, do 2.1 - 2.3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * 5.4 Wake up next QP on RCD TID queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * 5.5 Return indication progress made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) e = &qp->s_ack_queue[qp->s_tail_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) req = ack_to_tid_req(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Send scheduled RNR NAK's. RNR NAK's need to be sent at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * segment boundaries, not at request boundaries. Don't change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * s_ack_state because we are still in the middle of a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) req->cur_seg == req->alloc_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto normal_no_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bth2 = mask_psn(qp->s_ack_rdma_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bth2, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) &ps->s_txreq->ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!hdrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) hwords += hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bth0 = qp->s_ack_state << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) qp->s_ack_rdma_psn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) e->lpsn, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (req->cur_seg != req->total_segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) e->sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Do not free e->rdma_sge until all data are received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) case TID_OP(READ_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) read_resp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) e = &qp->s_ack_queue[qp->s_tail_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ps->s_txreq->ss = &qp->s_ack_rdma_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) &bth1, &bth2, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) &last_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (delta == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto error_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) hwords += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (last_pkt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) e->sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Increment qp->s_tail_ack_queue through s_ack_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) case TID_OP(READ_REQ):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) normal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Send a regular ACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Set the s_ack_state so we wait until after sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * the ACK before setting s_ack_state to ACKNOWLEDGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * (see above).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) qp->s_ack_state = OP(SEND_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) normal_no_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (qp->s_nak_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ohdr->u.aeth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) (qp->s_nak_state <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) IB_AETH_CREDIT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ohdr->u.aeth = rvt_compute_aeth(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) hwords++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) bth0 = OP(ACKNOWLEDGE) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) bth2 = mask_psn(qp->s_ack_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) qp->s_flags &= ~RVT_S_ACK_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ps->s_txreq->ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) qp->s_rdma_ack_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ps->s_txreq->sde = qpriv->s_sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ps->s_txreq->s_cur_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ps->s_txreq->hdr_dwords = hwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) error_qp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) spin_unlock_irqrestore(&qp->s_lock, ps->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_lock_irqsave(&qp->r_lock, ps->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) spin_lock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_unlock(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_irqrestore(&qp->r_lock, ps->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) spin_lock_irqsave(&qp->s_lock, ps->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) qp->s_ack_state = OP(ACKNOWLEDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Ensure s_rdma_ack_cnt changes are committed prior to resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * RVT_S_RESP_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) qp->s_flags &= ~(RVT_S_RESP_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) | RVT_S_ACK_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) | HFI1_S_AHG_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @qp: a pointer to the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Assumes s_lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Return 1 if constructed; otherwise, return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct ib_other_headers *ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct rvt_sge_state *ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct hfi1_swqe_priv *wpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct tid_rdma_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* header size in 32-bit words LRH+BTH = (8+12)/4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u32 hwords = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u32 len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u32 bth0 = 0, bth2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 pmtu = qp->pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) char newreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int middle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct tid_rdma_flow *flow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct tid_rdma_params *remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) trace_hfi1_sender_make_rc_req(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ps->s_txreq = get_txreq(ps->dev, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!ps->s_txreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto bail_no_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* header size in 32-bit words LRH+BTH = (8+12)/4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) hwords = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hwords = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Sending responses has higher priority over sending requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if ((qp->s_flags & RVT_S_RESP_PENDING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) make_rc_ack(dev, qp, ohdr, ps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* We are in the error state, flush the work request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (qp->s_last == READ_ONCE(qp->s_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* If DMAs are in progress, we can't flush immediately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (iowait_sdma_pending(&priv->s_iowait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) qp->s_flags |= RVT_S_WAIT_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) clear_ahg(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) wqe = rvt_get_swqe_ptr(qp, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* will get called again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto done_free_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) qp->s_flags |= RVT_S_WAIT_PSN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) qp->s_sending_psn = qp->s_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) qp->s_sending_hpsn = qp->s_psn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Send a request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) check_s_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) switch (qp->s_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Resend an old request or start a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * We keep track of the current SWQE so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * we don't reset the "furthest progress" state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * if we need to back up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) newreq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (qp->s_cur == qp->s_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* Check if send work queue is empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (qp->s_tail == READ_ONCE(qp->s_head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) clear_ahg(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * If a fence is requested, wait for previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * RDMA read and atomic operations to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * However, there is no need to guard against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * TID RDMA READ after TID RDMA READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) qp->s_num_rd_atomic &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) qp->s_flags |= RVT_S_WAIT_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Local operations are processed immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * after all prior requests have completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (wqe->wr.opcode == IB_WR_REG_MR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) wqe->wr.opcode == IB_WR_LOCAL_INV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int local_ops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (qp->s_last != qp->s_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (++qp->s_tail == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) qp->s_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!(wqe->wr.send_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) RVT_SEND_COMPLETION_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) err = rvt_invalidate_rkey(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) wqe->wr.ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) local_ops = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rvt_send_complete(qp, wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) err ? IB_WC_LOC_PROT_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) : IB_WC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (local_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) atomic_dec(&qp->local_ops_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto done_free_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) newreq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) qp->s_psn = wqe->psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Note that we have to be careful not to modify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * original work request since we may need to resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ss = &qp->s_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) bth2 = mask_psn(qp->s_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * Interlock between various IB requests and TID RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) hfi1_tid_rdma_wqe_interlock(qp, wqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) switch (wqe->wr.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) case IB_WR_SEND_WITH_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* If no credit, return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!rvt_rc_credit_avail(qp, wqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (len > pmtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) qp->s_state = OP(SEND_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) len = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (wqe->wr.opcode == IB_WR_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) qp->s_state = OP(SEND_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Immediate data comes after the BTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ohdr->u.imm_data = wqe->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) hwords += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Invalidate rkey comes after the BTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ohdr->u.ieth = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) wqe->wr.ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) hwords += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) bth0 |= IB_BTH_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) qp->s_lsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto no_flow_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* If no credit, return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (!rvt_rc_credit_avail(qp, wqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) no_flow_control:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) put_ib_reth_vaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) wqe->rdma_wr.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) &ohdr->u.rc.reth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ohdr->u.rc.reth.rkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) cpu_to_be32(wqe->rdma_wr.rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ohdr->u.rc.reth.length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) hwords += sizeof(struct ib_reth) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (len > pmtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) qp->s_state = OP(RDMA_WRITE_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) len = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) qp->s_state = OP(RDMA_WRITE_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) qp->s_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Immediate data comes after RETH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) hwords += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) bth0 |= IB_BTH_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case IB_WR_TID_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (newreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Limit the number of TID RDMA WRITE requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (atomic_read(&priv->n_tid_requests) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) HFI1_TID_RDMA_WRITE_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) qp->s_lsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) &bth1, &bth2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) priv->s_tid_cur = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) priv->s_tid_tail = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) priv->s_state = TID_OP(WRITE_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else if (priv->s_tid_cur == priv->s_tid_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct rvt_swqe *__w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct tid_rdma_request *__r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) __r = wqe_to_tid_req(__w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * The s_tid_cur pointer is advanced to s_cur if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * any of the following conditions about the WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * to which s_ti_cur currently points to are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * satisfied:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * 1. The request is not a TID RDMA WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * 2. The request is in the INACTIVE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * COMPLETE states (TID RDMA READ requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * stay at INACTIVE and TID RDMA WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * transition to COMPLETE when done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * 3. The request is in the ACTIVE or SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * state and the number of completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * segments is equal to the total segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * (If ACTIVE, the request is waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * ACKs. If SYNC, the request has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * received any responses because it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * waiting on a sync point.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) __r->state == TID_REQUEST_INACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) __r->state == TID_REQUEST_COMPLETE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ((__r->state == TID_REQUEST_ACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) __r->state == TID_REQUEST_SYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) __r->comp_seg == __r->total_segs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (priv->s_tid_tail ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) priv->s_tid_cur &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) priv->s_state ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) TID_OP(WRITE_DATA_LAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) priv->s_tid_tail = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) priv->s_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) TID_OP(WRITE_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) priv->s_tid_cur = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * A corner case: when the last TID RDMA WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * request was completed, s_tid_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * s_tid_cur, and s_tid_tail all point to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * same location. Other requests are posted and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * s_cur wraps around to the same location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * where a new TID RDMA WRITE is posted. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * this case, none of the indices need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * updated. However, the priv->s_state should.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (priv->s_tid_tail == qp->s_cur &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) priv->s_state == TID_OP(WRITE_DATA_LAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) priv->s_state = TID_OP(WRITE_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (newreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) priv->s_tid_head = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) priv->pending_tid_w_resp += req->total_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) atomic_inc(&priv->n_tid_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) atomic_dec(&priv->n_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) req->state = TID_REQUEST_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) req->comp_seg = delta_psn(bth2, wqe->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Pull back any segments since we are going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * to re-receive them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) req->setup_head = req->clear_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) priv->pending_tid_w_resp +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) delta_psn(wqe->lpsn, bth2) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) trace_hfi1_tid_write_sender_make_req(qp, newreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) trace_hfi1_tid_req_make_req_write(qp, newreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) wqe->wr.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) wqe->psn, wqe->lpsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Don't allow more operations to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * than the QP limits allow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (qp->s_num_rd_atomic >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) qp->s_max_rd_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) qp->s_flags |= RVT_S_WAIT_RDMAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) qp->s_num_rd_atomic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) qp->s_lsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) put_ib_reth_vaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) wqe->rdma_wr.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) &ohdr->u.rc.reth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ohdr->u.rc.reth.rkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cpu_to_be32(wqe->rdma_wr.rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ohdr->u.rc.reth.length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) qp->s_state = OP(RDMA_READ_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) case IB_WR_TID_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) trace_hfi1_tid_read_sender_make_req(qp, newreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) wpriv = wqe->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) trace_hfi1_tid_req_make_req_read(qp, newreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) wqe->wr.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) wqe->psn, wqe->lpsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) delta = cmp_psn(qp->s_psn, wqe->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * Don't allow more operations to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * than the QP limits allow. We could get here under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * three conditions; (1) It's a new request; (2) We are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * sending the second or later segment of a request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * when the last segment of a previous request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * received just before this; (3) We are re-sending a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) qp->s_flags |= RVT_S_WAIT_RDMAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (newreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct tid_rdma_flow *flow =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) &req->flows[req->setup_head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Set up s_sge as it is needed for TID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * allocation. However, if the pages have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * walked and mapped, skip it. An earlier try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * has failed to allocate the TID entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!flow->npagesets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) qp->s_sge.sge = wqe->sg_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) qp->s_sge.sg_list = wqe->sg_list + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) qp->s_sge.num_sge = wqe->wr.num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) qp->s_sge.total_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) qp->s_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) req->isge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) req->clear_tail = req->setup_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) req->flow_idx = req->setup_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) req->state = TID_REQUEST_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) } else if (delta == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* Re-send a request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) req->cur_seg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) req->comp_seg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) req->ack_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) req->flow_idx = req->clear_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) req->state = TID_REQUEST_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) req->s_next_psn = qp->s_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Read one segment at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) len = min_t(u32, req->seg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) wqe->length - req->seg_len * req->cur_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) &bth1, &bth2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (delta <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Wait for TID space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) qp->s_lsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) hwords += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ss = &wpriv->ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Check if this is the last segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (req->cur_seg >= req->total_segs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case IB_WR_ATOMIC_CMP_AND_SWP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) case IB_WR_ATOMIC_FETCH_AND_ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Don't allow more operations to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * than the QP limits allow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (qp->s_num_rd_atomic >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) qp->s_max_rd_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) qp->s_flags |= RVT_S_WAIT_RDMAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) qp->s_num_rd_atomic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) case IB_WR_OPFN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) qp->s_lsn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) wqe->wr.opcode == IB_WR_OPFN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) qp->s_state = OP(COMPARE_SWAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) put_ib_ateth_swap(wqe->atomic_wr.swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) &ohdr->u.atomic_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) put_ib_ateth_compare(wqe->atomic_wr.compare_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) &ohdr->u.atomic_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) qp->s_state = OP(FETCH_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) put_ib_ateth_swap(wqe->atomic_wr.compare_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) &ohdr->u.atomic_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) &ohdr->u.atomic_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ohdr->u.atomic_eth.rkey = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) wqe->atomic_wr.rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) qp->s_sge.sge = wqe->sg_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) qp->s_sge.sg_list = wqe->sg_list + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) qp->s_sge.num_sge = wqe->wr.num_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) qp->s_sge.total_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) qp->s_len = wqe->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (newreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) qp->s_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (qp->s_tail >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) qp->s_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (wqe->wr.opcode == IB_WR_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) qp->s_psn = wqe->lpsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) qp->s_psn = req->s_next_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) qp->s_psn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) case OP(RDMA_READ_RESPONSE_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * qp->s_state is normally set to the opcode of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * last packet constructed for new requests and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * is never set to RDMA read response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * thread to indicate a SEND needs to be restarted from an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * earlier PSN without interfering with the sending thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * See restart_rc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) case OP(SEND_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) qp->s_state = OP(SEND_MIDDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) case OP(SEND_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) bth2 = mask_psn(qp->s_psn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ss = &qp->s_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) len = qp->s_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (len > pmtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) len = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) middle = HFI1_CAP_IS_KSET(SDMA_AHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (wqe->wr.opcode == IB_WR_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) qp->s_state = OP(SEND_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* Immediate data comes after the BTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ohdr->u.imm_data = wqe->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) hwords += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* invalidate data comes after the BTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) hwords += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) bth0 |= IB_BTH_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) qp->s_cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (qp->s_cur >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case OP(RDMA_READ_RESPONSE_LAST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * qp->s_state is normally set to the opcode of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * last packet constructed for new requests and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * is never set to RDMA read response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * RDMA_READ_RESPONSE_LAST is used by the ACK processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * thread to indicate a RDMA write needs to be restarted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * an earlier PSN without interfering with the sending thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * See restart_rc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) case OP(RDMA_WRITE_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) qp->s_state = OP(RDMA_WRITE_MIDDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) case OP(RDMA_WRITE_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) bth2 = mask_psn(qp->s_psn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ss = &qp->s_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) len = qp->s_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (len > pmtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) len = pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) middle = HFI1_CAP_IS_KSET(SDMA_AHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) qp->s_state = OP(RDMA_WRITE_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* Immediate data comes after the BTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ohdr->u.imm_data = wqe->wr.ex.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) hwords += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) bth0 |= IB_BTH_SOLICITED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) qp->s_cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (qp->s_cur >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) case OP(RDMA_READ_RESPONSE_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * qp->s_state is normally set to the opcode of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * last packet constructed for new requests and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * is never set to RDMA read response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * thread to indicate a RDMA read needs to be restarted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * an earlier PSN without interfering with the sending thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * See restart_rc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) put_ib_reth_vaddr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) wqe->rdma_wr.remote_addr + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) &ohdr->u.rc.reth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ohdr->u.rc.reth.rkey =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) cpu_to_be32(wqe->rdma_wr.rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) qp->s_state = OP(RDMA_READ_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) qp->s_psn = wqe->lpsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) qp->s_cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case TID_OP(WRITE_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * This value for s_state is used for restarting a TID RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * for more).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) req->state = TID_REQUEST_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) remote = rcu_dereference(priv->tid_rdma.remote);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) len = wqe->length - (req->comp_seg * remote->max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) bth2 = mask_psn(qp->s_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) &bth2, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) qp->s_psn = wqe->lpsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ss = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) qp->s_state = TID_OP(WRITE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) priv->s_tid_cur = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) wqe->psn, wqe->lpsn, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) case TID_OP(READ_RESP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* This is used to restart a TID read request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) wpriv = wqe->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * Back down. The field qp->s_psn has been set to the psn with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * which the request should be restart. It's OK to use division
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * as this is on the retry path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * The following function need to be redefined to return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * status to make sure that we find the flow. At the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * time, we can use the req->state change to check if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * call succeeds or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) req->state = TID_REQUEST_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (req->state != TID_REQUEST_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * Failed to find the flow. Release all allocated tid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) hfi1_kern_exp_rcv_clear_all(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) hfi1_kern_clear_hw_flow(priv->rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) req->state = TID_REQUEST_RESEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) len = min_t(u32, req->seg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) wqe->length - req->seg_len * req->cur_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) flow = &req->flows[req->flow_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) len -= flow->sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) req->s_next_psn = flow->flow_state.ib_lpsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) &bth2, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (delta <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Wait for TID space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) hwords += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ss = &wpriv->ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Check if this is the last segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (req->cur_seg >= req->total_segs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) qp->s_psn = req->s_next_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) wqe->psn, wqe->lpsn, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case TID_OP(READ_REQ):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) delta = cmp_psn(qp->s_psn, wqe->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * If the current WR is not TID RDMA READ, or this is the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * of a new request, we need to change the qp->s_state so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * the request can be set up properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) qp->s_cur == qp->s_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) qp->s_state = OP(RDMA_READ_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (delta == 0 || qp->s_cur == qp->s_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) goto check_s_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Rate limiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) qp->s_flags |= RVT_S_WAIT_RDMAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) wpriv = wqe->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Read one segment at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) len = min_t(u32, req->seg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) wqe->length - req->seg_len * req->cur_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) &bth2, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (delta <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /* Wait for TID space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) hwords += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ss = &wpriv->ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Check if this is the last segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (req->cur_seg >= req->total_segs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ++qp->s_cur == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) qp->s_psn = req->s_next_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) wqe->psn, wqe->lpsn, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) qp->s_sending_hpsn = bth2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) delta = delta_psn(bth2, wqe->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (delta && delta % HFI1_PSN_CREDIT == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (qp->s_flags & RVT_S_SEND_ONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) qp->s_flags &= ~RVT_S_SEND_ONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) qp->s_flags |= RVT_S_WAIT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) bth2 |= IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) qp->s_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ps->s_txreq->hdr_dwords = hwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ps->s_txreq->sde = priv->s_sde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ps->s_txreq->ss = ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ps->s_txreq->s_cur_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) hfi1_make_ruc_header(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) bth0 | (qp->s_state << 24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) bth1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) bth2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) middle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) done_free_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) hfi1_put_txreq(ps->s_txreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ps->s_txreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) hfi1_put_txreq(ps->s_txreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) bail_no_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ps->s_txreq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) qp->s_flags &= ~RVT_S_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * If we didn't get a txreq, the QP will be woken up later to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * again. Set the flags to indicate which work item to wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct ib_other_headers *ohdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) u32 bth0, u32 bth1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (qp->r_nak_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) (qp->r_nak_state <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) IB_AETH_CREDIT_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ohdr->u.aeth = rvt_compute_aeth(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ohdr->bth[0] = cpu_to_be32(bth0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ibp = rcd_to_iport(packet->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) this_cpu_inc(*ibp->rvp.rc_qacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) qp->s_nak_state = qp->r_nak_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) qp->s_ack_psn = qp->r_ack_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (is_fecn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) qp->s_flags |= RVT_S_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* Schedule the send tasklet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct hfi1_opa_header *opa_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) u8 sc5, bool is_fecn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) u64 *pbc_flags, u32 *hwords,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) u32 *nwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct ib_header *hdr = &opa_hdr->ibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct ib_other_headers *ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) u16 lrh0 = HFI1_LRH_BTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) u16 pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) u32 bth0, bth1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) *hwords = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) rdma_ah_read_grh(&qp->remote_ah_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) *hwords - 2, SIZE_OF_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) lrh0 = HFI1_LRH_GRH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* read pkey_index w/o lock (its atomic) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) IB_SL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (qp->s_mig_state == IB_MIG_MIGRATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) bth0 |= IB_BTH_MIG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) bth1 = (!!is_fecn) << IB_BECN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * Inline ACKs go out without the use of the Verbs send engine, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * we need to set the STL Verbs Extended bit here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct hfi1_opa_header *opa_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) u8 sc5, bool is_fecn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) u64 *pbc_flags, u32 *hwords,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) u32 *nwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct hfi1_16b_header *hdr = &opa_hdr->opah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct ib_other_headers *ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) u32 bth0, bth1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) u16 len, pkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) bool becn = is_fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) u8 l4 = OPA_16B_L4_IB_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) u8 extra_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) ohdr = &hdr->u.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) *hwords = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) rdma_ah_read_grh(&qp->remote_ah_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) *hwords - 4, *nwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ohdr = &hdr->u.l.oth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) l4 = OPA_16B_L4_IB_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* read pkey_index w/o lock (its atomic) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* Convert dwords to flits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) len = (*hwords + *nwords) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) hfi1_make_16b_hdr(hdr, ppd->lid |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) ((1 << ppd->lmc) - 1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 16B), len, pkey, becn, 0, l4, sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) bth0 |= extra_bytes << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (qp->s_mig_state == IB_MIG_MIGRATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) bth1 = OPA_BTH_MIG_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct hfi1_opa_header *opa_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) u8 sc5, bool is_fecn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) u64 *pbc_flags, u32 *hwords,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) u32 *nwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /* We support only two types - 9B and 16B for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * hfi1_send_rc_ack - Construct an ACK packet and send it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * @qp: a pointer to the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * Note that RDMA reads and atomics are handled in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * send side QP state and send engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) u64 pbc, pbc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u32 hwords = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) u32 nwords = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) u32 plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct pio_buf *pbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct hfi1_opa_header opa_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /* clear the defer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) qp->r_adefered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (qp->s_flags & RVT_S_RESP_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) hfi1_queue_rc_ack(packet, is_fecn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* Ensure s_rdma_ack_cnt changes are committed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (qp->s_rdma_ack_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) hfi1_queue_rc_ack(packet, is_fecn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* Don't try to send ACKs if the link isn't ACTIVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (driver_lstate(ppd) != IB_PORT_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Make the appropriate header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) &pbc_flags, &hwords, &nwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) plen = 2 /* PBC */ + hwords + nwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) sc_to_vlt(ppd->dd, sc5), plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (IS_ERR_OR_NULL(pbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * We have no room to send at the moment. Pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * responsibility for sending the ACK to the send engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * so that when enough buffer space becomes available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * the ACK is sent ahead of other outgoing packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) hfi1_queue_rc_ack(packet, is_fecn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) &opa_hdr, ib_is_sc5(sc5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* write the pbc and data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) (priv->hdr_type == HFI1_PKT_TYPE_9B ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) (void *)&opa_hdr.ibh :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) (void *)&opa_hdr.opah), hwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * update_num_rd_atomic - update the qp->s_num_rd_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * @psn: the packet sequence number to restart at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * @wqe: the wqe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * This is called from reset_psn() to update qp->s_num_rd_atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * for the current wqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * Called at interrupt level with the QP s_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct rvt_swqe *wqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) u32 opcode = wqe->wr.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (opcode == IB_WR_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) qp->s_num_rd_atomic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) } else if (opcode == IB_WR_TID_RDMA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct tid_rdma_request *req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (cmp_psn(psn, wqe->lpsn) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) u32 cur_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) cur_seg = (psn - wqe->psn) / priv->pkts_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) req->ack_pending = cur_seg - req->comp_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) priv->pending_tid_r_segs += req->ack_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) qp->s_num_rd_atomic += req->ack_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) wqe->wr.opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) wqe->psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) wqe->lpsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) priv->pending_tid_r_segs += req->total_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) qp->s_num_rd_atomic += req->total_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * reset_psn - reset the QP state to send starting from PSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * @psn: the packet sequence number to restart at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * This is called from hfi1_rc_rcv() to process an incoming RC ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * for the given QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * Called at interrupt level with the QP s_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static void reset_psn(struct rvt_qp *qp, u32 psn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) u32 n = qp->s_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) u32 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) qp->s_cur = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) priv->pending_tid_r_segs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) priv->pending_tid_w_resp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) qp->s_num_rd_atomic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * If we are starting the request from the beginning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * let the normal send code handle initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (cmp_psn(psn, wqe->psn) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) qp->s_state = OP(SEND_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) update_num_rd_atomic(qp, psn, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* Find the work request opcode corresponding to the given PSN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (++n == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (n == qp->s_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) wqe = rvt_get_swqe_ptr(qp, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) diff = cmp_psn(psn, wqe->psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (diff < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /* Point wqe back to the previous one*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) qp->s_cur = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * If we are starting the request from the beginning,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * let the normal send code handle initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (diff == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) qp->s_state = OP(SEND_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) update_num_rd_atomic(qp, psn, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) opcode = wqe->wr.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * Set the state to restart in the middle of a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Don't change the s_sge, s_cur_sge, or s_cur_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * See hfi1_make_rc_req().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) case IB_WR_SEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) case IB_WR_SEND_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) case IB_WR_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) case IB_WR_RDMA_WRITE_WITH_IMM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) case IB_WR_TID_RDMA_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) qp->s_state = TID_OP(WRITE_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) case IB_WR_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) case IB_WR_TID_RDMA_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) qp->s_state = TID_OP(READ_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * This case shouldn't happen since its only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * one PSN per req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) qp->s_state = OP(SEND_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) qp->s_psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * asynchronously before the send engine can get scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * Doing it in hfi1_make_rc_req() is too late.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) qp->s_flags |= RVT_S_WAIT_PSN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) qp->s_flags &= ~HFI1_S_AHG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) trace_hfi1_sender_reset_psn(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * Back up requester to resend the last un-ACKed request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * The QP r_lock and s_lock should be held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) lockdep_assert_held(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) trace_hfi1_sender_restart_rc(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (qp->s_retry == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (qp->s_mig_state == IB_MIG_ARMED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) hfi1_migrate_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) qp->s_retry = qp->s_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) } else if (qp->s_last == qp->s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * We need special handling for the OPFN request WQEs as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * they are not allowed to generate real user errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (wqe->wr.opcode == IB_WR_OPFN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct hfi1_ibport *ibp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) to_iport(qp->ibqp.device, qp->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * Call opfn_conn_reply() with capcode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * remaining data as 0 to close out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * current request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) opfn_conn_reply(qp, priv->opfn.curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) wqe = do_rc_completion(qp, wqe, ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) qp->s_flags &= ~RVT_S_WAIT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) trace_hfi1_tid_write_sender_restart_rc(qp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct tid_rdma_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) hfi1_kern_exp_rcv_clear_all(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) hfi1_kern_clear_hw_flow(priv->rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) hfi1_trdma_send_complete(qp, wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) IB_WC_RETRY_EXC_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) } else { /* need to handle delayed completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) qp->s_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) ibp = to_iport(qp->ibqp.device, qp->port_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (wqe->wr.opcode == IB_WR_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) wqe->wr.opcode == IB_WR_TID_RDMA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ibp->rvp.n_rc_resends++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) qp->s_flags |= RVT_S_SEND_ONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) reset_psn(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * Set qp->s_sending_psn to the next PSN after the given one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * This would be psn+1 except when RDMA reads or TID RDMA ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * are present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) u32 n = qp->s_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* Find the work request corresponding to the given PSN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) wqe = rvt_get_swqe_ptr(qp, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (cmp_psn(psn, wqe->lpsn) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (wqe->wr.opcode == IB_WR_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) qp->s_sending_psn = wqe->lpsn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) qp->s_sending_psn = psn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (++n == qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (n == qp->s_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * hfi1_rc_verbs_aborted - handle abort status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * @opah: the opa header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * This code modifies both ACK bit in BTH[2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * and the s_flags to go into send one mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * This serves to throttle the send engine to only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * send a single packet in the likely case the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * a link has gone down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) u8 opcode = ib_bth_get_opcode(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) u32 psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /* ignore responses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) opcode == TID_OP(READ_RESP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) opcode == TID_OP(WRITE_RESP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ohdr->bth[2] = cpu_to_be32(psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) qp->s_flags |= RVT_S_SEND_ONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * This should be called with the QP s_lock held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct ib_other_headers *ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) u32 opcode, head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) u32 psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct tid_rdma_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) ohdr = hfi1_get_rc_ohdr(opah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) opcode = ib_bth_get_opcode(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) opcode == TID_OP(READ_RESP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) opcode == TID_OP(WRITE_RESP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) WARN_ON(!qp->s_rdma_ack_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) qp->s_rdma_ack_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) psn = ib_bth_get_psn(ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * Don't attempt to reset the sending PSN for packets in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * KDETH PSN space since the PSN does not match anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (opcode != TID_OP(WRITE_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) opcode != TID_OP(WRITE_DATA_LAST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) reset_sending_psn(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* Handle TID RDMA WRITE packets differently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (opcode >= TID_OP(WRITE_REQ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) opcode <= TID_OP(WRITE_DATA_LAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) head = priv->s_tid_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) tail = priv->s_tid_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * s_tid_cur is set to s_tid_head in the case, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * a new TID RDMA request is being started and all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * previous ones have been completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * Therefore, we need to do a secondary check in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * to properly determine whether we should start the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * RC timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) wqe = rvt_get_swqe_ptr(qp, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (head == tail && req->comp_seg < req->total_segs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) if (tail == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) tail = qp->s_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) tail -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) head = qp->s_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) tail = qp->s_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * Start timer after a packet requesting an ACK has been sent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * there are still requests that haven't been acked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if ((psn & IB_BTH_REQ_ACK) && tail != head &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) opcode != TID_OP(RESYNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) !(qp->s_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (opcode == TID_OP(READ_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) rvt_add_retry_timer_ext(qp, priv->timeout_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) rvt_add_retry_timer(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /* Start TID RDMA ACK timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if ((opcode == TID_OP(WRITE_DATA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) opcode == TID_OP(WRITE_DATA_LAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) opcode == TID_OP(RESYNC)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) (psn & IB_BTH_REQ_ACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * The TID RDMA ACK packet could be received before this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * function is called. Therefore, add the timer only if TID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * RDMA ACK packets are actually pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) req = wqe_to_tid_req(wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) req->ack_seg < req->cur_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) hfi1_add_tid_retry_timer(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) while (qp->s_last != qp->s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) wqe = rvt_get_swqe_ptr(qp, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) trdma_clean_swqe(qp, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) rvt_qp_complete_swqe(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) ib_hfi1_wc_opcode[wqe->wr.opcode],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) IB_WC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * If we were waiting for sends to complete before re-sending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * and they are now complete, restart sending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) trace_hfi1_sendcomplete(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (qp->s_flags & RVT_S_WAIT_PSN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) qp->s_flags &= ~RVT_S_WAIT_PSN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) qp->s_sending_psn = qp->s_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) qp->s_sending_hpsn = qp->s_psn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) qp->s_last_psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * Generate a SWQE completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * This is similar to hfi1_send_complete but has to check to be sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * that the SGEs are not being referenced if the SWQE is being resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct rvt_swqe *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct hfi1_ibport *ibp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * Don't decrement refcount and don't generate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * completion if the SWQE is being resent until the send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) trace_hfi1_rc_completion(qp, wqe->lpsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) trdma_clean_swqe(qp, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) rvt_qp_complete_swqe(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) ib_hfi1_wc_opcode[wqe->wr.opcode],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) IB_WC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) this_cpu_inc(*ibp->rvp.rc_delayed_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * If send progress not running attempt to progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * SDMA queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct sdma_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) u8 sc5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /* For now use sc to find engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sc5 = ibp->sl_to_sc[sl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) engine = qp_to_sdma_engine(qp, sc5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) sdma_engine_progress_schedule(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) qp->s_retry = qp->s_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * Don't update the last PSN if the request being completed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * a TID RDMA WRITE request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * Completion of the TID RDMA WRITE requests are done by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * TID RDMA ACKs and as such could be for a request that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * already been ACKed as far as the IB state machine is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * concerned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) update_last_psn(qp, wqe->lpsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * If we are completing a request which is in the process of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * being resent, we can stop re-sending it since we know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * responder has already seen it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (qp->s_acked == qp->s_cur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (++qp->s_cur >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) qp->s_acked = qp->s_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (qp->s_acked != qp->s_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) qp->s_state = OP(SEND_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) qp->s_psn = wqe->psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (++qp->s_acked >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) qp->s_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) qp->s_draining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /* Retry this request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) qp->r_flags |= RVT_R_RDMAR_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (list_empty(&qp->rspwait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) qp->r_flags |= RVT_R_RSP_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * update_qp_retry_state - Update qp retry state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * @qp: the QP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * @psn: the packet sequence number of the TID RDMA WRITE RESP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * @spsn: The start psn for the given TID RDMA WRITE swqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * @lpsn: The last psn for the given TID RDMA WRITE swqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * This function is called to update the qp retry state upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * receiving a TID WRITE RESP after the qp is scheduled to retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) u32 lpsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct hfi1_qp_priv *qpriv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) qp->s_psn = psn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * If this is the first TID RDMA WRITE RESP packet for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * request, change the s_state so that the retry will be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * correctly. Similarly, if this is the last TID RDMA WRITE RESP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * packet, change the s_state and advance the s_cur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (cmp_psn(psn, lpsn) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) qp->s_cur = qpriv->s_tid_cur + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (qp->s_cur >= qp->s_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) qp->s_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) qp->s_state = TID_OP(WRITE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) } else if (!cmp_psn(psn, spsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) qp->s_cur = qpriv->s_tid_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) qp->s_state = TID_OP(WRITE_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * do_rc_ack - process an incoming RC ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * @qp: the QP the ACK came in on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * @psn: the packet sequence number of the ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * @opcode: the opcode of the request that resulted in the ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * This is called from rc_rcv_resp() to process an incoming RC ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * for the given QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * May be called at interrupt level, with the QP s_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * Returns 1 if OK, 0 if current operation should be aborted (NAK).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) u64 val, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) enum ib_wc_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct hfi1_qp_priv *qpriv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) u32 ack_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct rvt_dev_info *rdi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * Note that NAKs implicitly ACK outstanding SEND and RDMA write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * requests and implicitly NAK RDMA read and atomic requests issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * before the NAK'ed request. The MSN won't include the NAK'ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * request but will include an ACK'ed request(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) ack_psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (aeth >> IB_AETH_NAK_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) ack_psn--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * The MSN might be for a later WQE than the PSN indicates so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * only complete WQEs that the PSN finishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * RDMA_READ_RESPONSE_ONLY is a special case since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * we want to generate completion events for everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * before the RDMA read, copy the data, then generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * the completion for the read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (wqe->wr.opcode == IB_WR_RDMA_READ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) diff == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * If this request is a RDMA read or atomic, and the ACK is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * for a later operation, this ACK NAKs the RDMA read or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * atomic. In other words, only a RDMA_READ_LAST or ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * can ACK a RDMA read and likewise for atomic ops. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * that the NAK case can only happen if relaxed ordering is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * used and requests are sent after an RDMA read or atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * is sent but before the response is received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) (opcode != TID_OP(READ_RESP) || diff != 0)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) (delta_psn(psn, qp->s_last_psn) != 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) set_restart_qp(qp, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * No need to process the ACK/NAK since we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * restarting an earlier request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) u64 *vaddr = wqe->sg_list[0].vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) *vaddr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (wqe->wr.opcode == IB_WR_OPFN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) opfn_conn_reply(qp, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (qp->s_num_rd_atomic &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) (wqe->wr.opcode == IB_WR_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) qp->s_num_rd_atomic--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /* Restart sending task if fence is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) !qp->s_num_rd_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) qp->s_flags &= ~(RVT_S_WAIT_FENCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) RVT_S_WAIT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) RVT_S_WAIT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * TID RDMA WRITE requests will be completed by the TID RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * ACK packet handler (see tid_rdma.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) wqe = do_rc_completion(qp, wqe, ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (qp->s_acked == qp->s_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) trace_hfi1_sender_do_rc_ack(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) switch (aeth >> IB_AETH_NAK_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) case 0: /* ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) this_cpu_inc(*ibp->rvp.rc_acks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (wqe_to_tid_req(wqe)->ack_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) rvt_mod_retry_timer_ext(qp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) qpriv->timeout_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) } else if (qp->s_acked != qp->s_tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct rvt_swqe *__w = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * Stop timers if we've received all of the TID RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * WRITE * responses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) opcode == TID_OP(WRITE_RESP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * Normally, the loop above would correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * process all WQEs from s_acked onward and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * either complete them or check for correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * PSN sequencing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * However, for TID RDMA, due to pipelining,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * the response may not be for the request at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * s_acked so the above look would just be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * skipped. This does not allow for checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * the PSN sequencing. It has to be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (cmp_psn(psn, qp->s_last_psn + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) set_restart_qp(qp, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * If the psn is being resent, stop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * resending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (qp->s_cur != qp->s_tail &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) cmp_psn(qp->s_psn, psn) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) update_qp_retry_state(qp, psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) __w->psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) __w->lpsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) else if (--qpriv->pending_tid_w_resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) rvt_mod_retry_timer(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * We are expecting more ACKs so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) * mod the retry timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) rvt_mod_retry_timer(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * We can stop re-sending the earlier packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * and continue with the next packet the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * receiver wants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) if (cmp_psn(qp->s_psn, psn) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) reset_psn(qp, psn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) /* No more acks - kill all timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (cmp_psn(qp->s_psn, psn) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) qp->s_state = OP(SEND_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) qp->s_psn = psn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (qp->s_flags & RVT_S_WAIT_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) qp->s_flags &= ~RVT_S_WAIT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) rvt_get_credit(qp, aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) qp->s_rnr_retry = qp->s_rnr_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) qp->s_retry = qp->s_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * If the current request is a TID RDMA WRITE request and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * response is not a TID RDMA WRITE RESP packet, s_last_psn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * can't be advanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) opcode != TID_OP(WRITE_RESP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) cmp_psn(psn, wqe->psn) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) update_last_psn(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) case 1: /* RNR NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) ibp->rvp.n_rnr_naks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (qp->s_acked == qp->s_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (qp->s_flags & RVT_S_WAIT_RNR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) rdi = ib_to_rvt(qp->ibqp.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (!(rdi->post_parms[wqe->wr.opcode].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) RVT_OPERATION_IGN_RNR_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (qp->s_rnr_retry == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) status = IB_WC_RNR_RETRY_EXC_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) goto class_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) qp->s_rnr_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * The last valid PSN is the previous PSN. For TID RDMA WRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * request, s_last_psn should be incremented only when a TID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * WRITE RESP packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) reset_psn(qp, qp->s_last_psn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) update_last_psn(qp, psn - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) reset_psn(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) rvt_add_rnr_timer(qp, aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) case 3: /* NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (qp->s_acked == qp->s_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* The last valid PSN is the previous PSN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) update_last_psn(qp, psn - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) IB_AETH_CREDIT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) case 0: /* PSN sequence error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) ibp->rvp.n_seq_naks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * Back up to the responder's expected PSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) * Note that we might get a NAK in the middle of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) * RDMA READ response which terminates the RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * READ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) hfi1_restart_rc(qp, psn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) case 1: /* Invalid Request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) status = IB_WC_REM_INV_REQ_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ibp->rvp.n_other_naks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) goto class_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) case 2: /* Remote Access Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) status = IB_WC_REM_ACCESS_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) ibp->rvp.n_other_naks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) goto class_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) case 3: /* Remote Operation Error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) status = IB_WC_REM_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) ibp->rvp.n_other_naks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) class_b:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (qp->s_last == qp->s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) hfi1_kern_read_tid_flow_free(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) hfi1_trdma_send_complete(qp, wqe, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /* Ignore other reserved NAK error codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) goto reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) qp->s_retry = qp->s_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) qp->s_rnr_retry = qp->s_rnr_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) default: /* 2: reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) reserved:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* Ignore reserved NAK codes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) goto bail_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) /* cannot be reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) bail_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * We have seen an out of sequence RDMA read middle or last packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) lockdep_assert_held(&qp->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) /* Remove QP from retry timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) rvt_stop_rc_timers(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) while (cmp_psn(psn, wqe->lpsn) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (wqe->wr.opcode == IB_WR_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) wqe = do_rc_completion(qp, wqe, ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) ibp->rvp.n_rdma_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) qp->r_flags |= RVT_R_RDMAR_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (list_empty(&qp->rspwait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) qp->r_flags |= RVT_R_RSP_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) * rc_rcv_resp - process an incoming RC response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) * @packet: data packet information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) * This is called from hfi1_rc_rcv() to process an incoming RC response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * packet for the given QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * Called at interrupt level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) static void rc_rcv_resp(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) void *data = packet->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) u32 tlen = packet->tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) struct hfi1_ibport *ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct ib_other_headers *ohdr = packet->ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct rvt_swqe *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) enum ib_wc_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) u32 aeth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) u32 psn = ib_bth_get_psn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) u32 pmtu = qp->pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) u16 hdrsize = packet->hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) u8 opcode = packet->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) u8 pad = packet->pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) trace_hfi1_ack(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /* Ignore invalid responses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) /* Ignore duplicate responses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) diff = cmp_psn(psn, qp->s_last_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (unlikely(diff <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) /* Update credits for "ghost" ACKs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) aeth = be32_to_cpu(ohdr->u.aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) rvt_get_credit(qp, aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * Skip everything other than the PSN we expect, if we are waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * for a reply to a restarted RDMA read or atomic op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (qp->r_flags & RVT_R_RDMAR_SEQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) qp->r_flags &= ~RVT_R_RDMAR_SEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (unlikely(qp->s_acked == qp->s_tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) case OP(ACKNOWLEDGE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) case OP(ATOMIC_ACKNOWLEDGE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) case OP(RDMA_READ_RESPONSE_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) aeth = be32_to_cpu(ohdr->u.aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (opcode == OP(ATOMIC_ACKNOWLEDGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) opcode != OP(RDMA_READ_RESPONSE_FIRST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) goto ack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * If this is a response to a resent RDMA read, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * have to be careful to copy the data to the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) wqe, psn, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) goto read_middle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) case OP(RDMA_READ_RESPONSE_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) /* no AETH, no ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) goto ack_seq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) goto ack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) read_middle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) goto ack_len_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (unlikely(pmtu >= qp->s_rdma_read_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) goto ack_len_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * We got a response so update the timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * 4.096 usec. * (1 << qp->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) rvt_mod_retry_timer(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (qp->s_flags & RVT_S_WAIT_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) qp->s_flags &= ~RVT_S_WAIT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) qp->s_retry = qp->s_retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * Update the RDMA receive state but do the copy w/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * holding the locks and blocking interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) qp->s_rdma_read_len -= pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) update_last_psn(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) rvt_copy_sge(qp, &qp->s_rdma_read_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) data, pmtu, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) case OP(RDMA_READ_RESPONSE_ONLY):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) aeth = be32_to_cpu(ohdr->u.aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) * Check that the data size is >= 0 && <= pmtu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) * Remember to account for ICRC (4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (unlikely(tlen < (hdrsize + extra_bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) goto ack_len_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * If this is a response to a resent RDMA read, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * have to be careful to copy the data to the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) wqe, psn, pmtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) goto read_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) case OP(RDMA_READ_RESPONSE_LAST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) /* ACKs READ req. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) goto ack_seq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) goto ack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) * Check that the data size is >= 1 && <= pmtu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) * Remember to account for ICRC (4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (unlikely(tlen <= (hdrsize + extra_bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) goto ack_len_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) read_last:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) tlen -= hdrsize + extra_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (unlikely(tlen != qp->s_rdma_read_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) goto ack_len_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) aeth = be32_to_cpu(ohdr->u.aeth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) rvt_copy_sge(qp, &qp->s_rdma_read_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) data, tlen, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) WARN_ON(qp->s_rdma_read_sge.num_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) (void)do_rc_ack(qp, aeth, psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) ack_op_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) status = IB_WC_LOC_QP_OP_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) goto ack_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) ack_seq_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) rdma_seq_err(qp, ibp, psn, rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) goto ack_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) ack_len_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) status = IB_WC_LOC_LEN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) ack_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (qp->s_last == qp->s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) rvt_send_complete(qp, wqe, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) ack_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) static inline void rc_cancel_ack(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) qp->r_adefered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (list_empty(&qp->rspwait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) list_del_init(&qp->rspwait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) qp->r_flags &= ~RVT_R_RSP_NAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) rvt_put_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) * rc_rcv_error - process an incoming duplicate or error RC packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * @ohdr: the other headers for this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * @data: the packet data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * @qp: the QP for this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * @opcode: the opcode for this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * @psn: the packet sequence number for this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * @diff: the difference between the PSN and the expected PSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) * This is called from hfi1_rc_rcv() to process an unexpected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) * incoming RC packet for the given QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * Called at interrupt level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * Return 1 if no more processing is needed; otherwise return 0 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * schedule a response to be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) struct rvt_qp *qp, u32 opcode, u32 psn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) int diff, struct hfi1_ctxtdata *rcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct rvt_ack_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) u8 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) u8 mra; /* most recent ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) bool old_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) trace_hfi1_rcv_error(qp, psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (diff > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * Packet sequence error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * A NAK will ACK earlier sends and RDMA writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * Don't queue the NAK if we already sent one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (!qp->r_nak_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) ibp->rvp.n_rc_seqnak++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) qp->r_nak_state = IB_NAK_PSN_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /* Use the expected PSN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) qp->r_ack_psn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * Wait to send the sequence NAK until all packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * in the receive queue have been processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * Otherwise, we end up propagating congestion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) rc_defered_ack(rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) * Handle a duplicate request. Don't re-execute SEND, RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) * write or atomic op. Don't NAK errors, just silently drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) * the duplicate request. Note that r_sge, r_len, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * r_rcv_len may be in use so don't modify them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) * We are supposed to ACK the earliest duplicate PSN but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * can coalesce an outstanding duplicate ACK. We have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * send the earliest so that RDMA reads can be restarted at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * the requester's expected PSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * First, find where this duplicate PSN falls within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * ACKs previously sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * old_req is true if there is an older response that is scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) * to be sent before sending this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) old_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) ibp->rvp.n_rc_dupreq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) case OP(RDMA_READ_REQUEST): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct ib_reth *reth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) * If we didn't find the RDMA read request in the ack queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) * we can ignore this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) if (!e || e->opcode != OP(RDMA_READ_REQUEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /* RETH comes after BTH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) reth = &ohdr->u.rc.reth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) * Address range must be a subset of the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) * request and start on pmtu boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * We reuse the old ack_queue slot since the requester
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) * should not back up and request an earlier PSN for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) * same request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) offset = delta_psn(psn, e->psn) * qp->pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) len = be32_to_cpu(reth->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (unlikely(offset + len != e->rdma_sge.sge_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) release_rdma_sge_mr(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) u32 rkey = be32_to_cpu(reth->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) u64 vaddr = get_ib_reth_vaddr(reth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) int ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) IB_ACCESS_REMOTE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (unlikely(!ok))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) e->rdma_sge.vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) e->rdma_sge.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) e->rdma_sge.sge_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) e->psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) if (old_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) qp->s_acked_ack_queue = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) qp->s_tail_ack_queue = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) case OP(COMPARE_SWAP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) case OP(FETCH_ADD): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * If we didn't find the atomic request in the ack queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * or the send engine is already backed up to send an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * earlier entry, we can ignore this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (!e || e->opcode != (u8)opcode || old_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) qp->s_acked_ack_queue = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) qp->s_tail_ack_queue = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) * Ignore this operation if it doesn't request an ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) * or an earlier RDMA read or atomic is going to be resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) if (!(psn & IB_BTH_REQ_ACK) || old_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * Resend the most recent ACK if this request is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * after all the previous RDMA reads and atomics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (mra == qp->r_head_ack_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) qp->r_nak_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) qp->r_ack_psn = qp->r_psn - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) goto send_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * Resend the RDMA read or atomic op which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * ACKs this duplicate request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) qp->s_acked_ack_queue = mra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) qp->s_tail_ack_queue = mra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) qp->s_ack_state = OP(ACKNOWLEDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) qp->s_flags |= RVT_S_RESP_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) qp->r_nak_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) unlock_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) send_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) u32 lqpn, u32 rqpn, u8 svc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) struct opa_hfi1_cong_log_event_internal *cc_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (sl >= OPA_MAX_SLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) spin_lock_irqsave(&ppd->cc_log_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) ppd->threshold_event_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) cc_event = &ppd->cc_events[ppd->cc_log_idx++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) ppd->cc_log_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) cc_event->lqpn = lqpn & RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) cc_event->rqpn = rqpn & RVT_QPN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) cc_event->sl = sl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) cc_event->svc_type = svc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) cc_event->rlid = rlid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) /* keep timestamp in units of 1.024 usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) cc_event->timestamp = ktime_get_ns() / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) u32 rqpn, u8 svc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) struct cca_timer *cca_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) u16 ccti, ccti_incr, ccti_timer, ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) u8 trigger_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) struct cc_state *cc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if (sl >= OPA_MAX_SLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) cc_state = get_cc_state(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) if (!cc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * 1) increase CCTI (for this SL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) * 2) select IPG (i.e., call set_link_ipg())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) * 3) start timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) ccti_limit = cc_state->cct.ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) trigger_threshold =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) cc_state->cong_setting.entries[sl].trigger_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) spin_lock_irqsave(&ppd->cca_timer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) cca_timer = &ppd->cca_timer[sl];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (cca_timer->ccti < ccti_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (cca_timer->ccti + ccti_incr <= ccti_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) cca_timer->ccti += ccti_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) cca_timer->ccti = ccti_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) set_link_ipg(ppd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) ccti = cca_timer->ccti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (!hrtimer_active(&cca_timer->hrtimer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) /* ccti_timer is in units of 1.024 usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) unsigned long nsec = 1024 * ccti_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) HRTIMER_MODE_REL_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * hfi1_rc_rcv - process an incoming RC packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * @packet: data packet information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * This is called from qp_rcv() to process an incoming RC packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * for the given QP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * May be called at interrupt level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) void hfi1_rc_rcv(struct hfi1_packet *packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct hfi1_ctxtdata *rcd = packet->rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) void *data = packet->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) u32 tlen = packet->tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct rvt_qp *qp = packet->qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) struct hfi1_qp_priv *qpriv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) struct ib_other_headers *ohdr = packet->ohdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) u32 opcode = packet->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) u32 hdrsize = packet->hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) u32 psn = ib_bth_get_psn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) u32 pad = packet->pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct ib_wc wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) u32 pmtu = qp->pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) struct ib_reth *reth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) bool copy_last = false, fecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) u32 rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) lockdep_assert_held(&qp->r_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (hfi1_ruc_check_hdr(ibp, packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) fecn = process_ecn(qp, packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * Process responses (ACKs) before anything else. Note that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) * packet sequence number will be for something in the send work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * queue rather than the expected receive packet sequence number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * In other words, this QP is the requester.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) rc_rcv_resp(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) /* Compute 24 bits worth of difference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) diff = delta_psn(psn, qp->r_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (unlikely(diff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) goto send_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /* Check for opcode sequence errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) switch (qp->r_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) case OP(SEND_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) case OP(SEND_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) if (opcode == OP(SEND_MIDDLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) opcode == OP(SEND_LAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) opcode == OP(SEND_LAST_WITH_INVALIDATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) case OP(RDMA_WRITE_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) case OP(RDMA_WRITE_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (opcode == OP(RDMA_WRITE_MIDDLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) opcode == OP(RDMA_WRITE_LAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (opcode == OP(SEND_MIDDLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) opcode == OP(SEND_LAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) opcode == OP(RDMA_WRITE_MIDDLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) opcode == OP(RDMA_WRITE_LAST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * Note that it is up to the requester to not send a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) * RDMA read or atomic operation before receiving an ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) * for the previous operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) rvt_comm_est(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) /* OK, process the packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) case OP(SEND_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) ret = rvt_get_rwqe(qp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) goto nack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) goto rnr_nak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) qp->r_rcv_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) case OP(SEND_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) case OP(RDMA_WRITE_MIDDLE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) send_middle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) /* Check for invalid length PMTU or posted rwqe len. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) * There will be no padding for 9B packet but 16B packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) * will come in with some padding since we always add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) * CRC and LT bytes which will need to be flit aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) qp->r_rcv_len += pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (unlikely(qp->r_rcv_len > qp->r_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) /* consume RWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) ret = rvt_get_rwqe(qp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) goto nack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) goto rnr_nak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) goto send_last_imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) case OP(SEND_ONLY):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) case OP(SEND_ONLY_WITH_IMMEDIATE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) case OP(SEND_ONLY_WITH_INVALIDATE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) ret = rvt_get_rwqe(qp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) goto nack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) goto rnr_nak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) qp->r_rcv_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) if (opcode == OP(SEND_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) goto no_immediate_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) goto send_last_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) case OP(SEND_LAST_WITH_IMMEDIATE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) send_last_imm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) wc.ex.imm_data = ohdr->u.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) wc.wc_flags = IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) goto send_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) case OP(SEND_LAST_WITH_INVALIDATE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) send_last_inv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) rkey = be32_to_cpu(ohdr->u.ieth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (rvt_invalidate_rkey(qp, rkey))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) goto no_immediate_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) wc.ex.invalidate_rkey = rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) wc.wc_flags = IB_WC_WITH_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) goto send_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) case OP(RDMA_WRITE_LAST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) copy_last = rvt_is_user_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) case OP(SEND_LAST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) no_immediate_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) wc.wc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) wc.ex.imm_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) send_last:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) /* Check for invalid length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) /* LAST len should be >= 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) if (unlikely(tlen < (hdrsize + extra_bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) /* Don't count the CRC(and padding and LT byte for 16B). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) tlen -= (hdrsize + extra_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) wc.byte_len = tlen + qp->r_rcv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (unlikely(wc.byte_len > qp->r_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) rvt_put_ss(&qp->r_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) qp->r_msn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) wc.wr_id = qp->r_wr_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) wc.status = IB_WC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) wc.opcode = IB_WC_RECV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) wc.qp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) wc.src_qp = qp->remote_qpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) * It seems that IB mandates the presence of an SL in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) * work completion only for the UD transport (see section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) * 11.4.2 of IBTA Vol. 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * However, the way the SL is chosen below is consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) * with the way that IB/qib works and is trying avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) * introducing incompatibilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) * See also OPA Vol. 1, section 9.7.6, and table 9-17.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* zero fields that are N/A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) wc.vendor_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) wc.pkey_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) wc.dlid_path_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) wc.port_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* Signal completion event if the solicited bit is set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) case OP(RDMA_WRITE_ONLY):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) copy_last = rvt_is_user_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) case OP(RDMA_WRITE_FIRST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /* consume RWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) reth = &ohdr->u.rc.reth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) qp->r_len = be32_to_cpu(reth->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) qp->r_rcv_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) qp->r_sge.sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (qp->r_len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) u32 rkey = be32_to_cpu(reth->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) u64 vaddr = get_ib_reth_vaddr(reth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) int ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) /* Check rkey & NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) rkey, IB_ACCESS_REMOTE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (unlikely(!ok))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) goto nack_acc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) qp->r_sge.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) qp->r_sge.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) qp->r_sge.sge.mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) qp->r_sge.sge.vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) qp->r_sge.sge.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) qp->r_sge.sge.sge_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) if (opcode == OP(RDMA_WRITE_FIRST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) goto send_middle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) else if (opcode == OP(RDMA_WRITE_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) goto no_immediate_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) ret = rvt_get_rwqe(qp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) goto nack_op_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) /* peer will send again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) rvt_put_ss(&qp->r_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) goto rnr_nak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) wc.ex.imm_data = ohdr->u.rc.imm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) wc.wc_flags = IB_WC_WITH_IMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) goto send_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) case OP(RDMA_READ_REQUEST): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) struct rvt_ack_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) u8 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) next = qp->r_head_ack_queue + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (unlikely(next == qp->s_acked_ack_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (!qp->s_ack_queue[next].sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) goto nack_inv_unlck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) update_ack_queue(qp, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) e = &qp->s_ack_queue[qp->r_head_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) release_rdma_sge_mr(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) reth = &ohdr->u.rc.reth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) len = be32_to_cpu(reth->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) u32 rkey = be32_to_cpu(reth->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) u64 vaddr = get_ib_reth_vaddr(reth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) int ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /* Check rkey & NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) rkey, IB_ACCESS_REMOTE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) if (unlikely(!ok))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) goto nack_acc_unlck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) * Update the next expected PSN. We add 1 later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) * below, so only add the remainder here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) qp->r_psn += rvt_div_mtu(qp, len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) e->rdma_sge.mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) e->rdma_sge.vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) e->rdma_sge.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) e->rdma_sge.sge_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) e->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) e->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) e->psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) e->lpsn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) * We need to increment the MSN here instead of when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * finish sending the result since a duplicate request would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) * increment it more than once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) qp->r_msn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) qp->r_psn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) qp->r_state = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) qp->r_nak_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) qp->r_head_ack_queue = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) qpriv->r_tid_alloc = qp->r_head_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /* Schedule the send engine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) qp->s_flags |= RVT_S_RESP_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (fecn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) qp->s_flags |= RVT_S_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) case OP(COMPARE_SWAP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) case OP(FETCH_ADD): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) u64 vaddr = get_ib_ateth_vaddr(ateth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) bool opfn = opcode == OP(COMPARE_SWAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) struct rvt_ack_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) atomic64_t *maddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) u64 sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) u32 rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) u8 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) !opfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) next = qp->r_head_ack_queue + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) spin_lock_irqsave(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (unlikely(next == qp->s_acked_ack_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) if (!qp->s_ack_queue[next].sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) goto nack_inv_unlck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) update_ack_queue(qp, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) e = &qp->s_ack_queue[qp->r_head_ack_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) release_rdma_sge_mr(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) /* Process OPFN special virtual address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) if (opfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) opfn_conn_response(qp, e, ateth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) goto ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) if (unlikely(vaddr & (sizeof(u64) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) goto nack_inv_unlck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) rkey = be32_to_cpu(ateth->rkey);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) /* Check rkey & NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) vaddr, rkey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) IB_ACCESS_REMOTE_ATOMIC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) goto nack_acc_unlck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) /* Perform atomic OP and save result. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) sdata = get_ib_ateth_swap(ateth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) e->atomic_data = (opcode == OP(FETCH_ADD)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) (u64)atomic64_add_return(sdata, maddr) - sdata :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) get_ib_ateth_compare(ateth),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) rvt_put_mr(qp->r_sge.sge.mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) qp->r_sge.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) e->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) e->sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) e->psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) e->lpsn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) qp->r_msn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) qp->r_psn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) qp->r_state = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) qp->r_nak_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) qp->r_head_ack_queue = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) qpriv->r_tid_alloc = qp->r_head_ack_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) /* Schedule the send engine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) qp->s_flags |= RVT_S_RESP_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (fecn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) qp->s_flags |= RVT_S_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) hfi1_schedule_send(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) /* NAK unknown opcodes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) goto nack_inv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) qp->r_psn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) qp->r_state = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) qp->r_ack_psn = psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) qp->r_nak_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) /* Send an ACK if requested or required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if (psn & IB_BTH_REQ_ACK || fecn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) if (packet->numpkt == 0 || fecn ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) qp->r_adefered >= HFI1_PSN_CREDIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) rc_cancel_ack(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) goto send_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) qp->r_adefered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) rc_defered_ack(rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) rnr_nak:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) qp->r_ack_psn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) /* Queue RNR NAK for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) rc_defered_ack(rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) nack_op_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) qp->r_ack_psn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) /* Queue NAK for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) rc_defered_ack(rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) nack_inv_unlck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) nack_inv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) qp->r_nak_state = IB_NAK_INVALID_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) qp->r_ack_psn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) /* Queue NAK for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) rc_defered_ack(rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) nack_acc_unlck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) spin_unlock_irqrestore(&qp->s_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) nack_acc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) qp->r_ack_psn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) send_ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) hfi1_send_rc_ack(packet, fecn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) void hfi1_rc_hdrerr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) struct hfi1_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) struct hfi1_ibport *ibp = rcd_to_iport(rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) u32 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) u32 psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) if (hfi1_ruc_check_hdr(ibp, packet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) psn = ib_bth_get_psn(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) opcode = ib_bth_get_opcode(packet->ohdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) /* Only deal with RDMA Writes for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) diff = delta_psn(psn, qp->r_psn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) if (!qp->r_nak_state && diff >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) ibp->rvp.n_rc_seqnak++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) qp->r_nak_state = IB_NAK_PSN_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) /* Use the expected PSN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) qp->r_ack_psn = qp->r_psn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * Wait to send the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) * NAK until all packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) * in the receive queue have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) * been processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) * Otherwise, we end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) * propagating congestion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) rc_defered_ack(rcd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) } /* Out of sequence NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) } /* QP Request NAKs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }