^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef HFI1_RC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define HFI1_RC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /* cut down ridiculously long IB macro names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define OP(x) IB_OPCODE_RC_##x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned int next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) next = n + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) qp->s_tail_ack_queue = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) qp->s_acked_ack_queue = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) qp->s_ack_state = OP(ACKNOWLEDGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (list_empty(&qp->rspwait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) qp->r_flags |= RVT_R_RSP_NAK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) rvt_get_qp(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u32 psn, u32 pmtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) len = delta_psn(psn, wqe->psn) * pmtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return rvt_restart_sge(ss, wqe, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (e->rdma_sge.mr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) rvt_put_mr(e->rdma_sge.mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) e->rdma_sge.mr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u8 *prev_ack, bool *scheduled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct hfi1_ctxtdata *rcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct hfi1_ibport *ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif /* HFI1_RC_H */