^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2018 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "hfi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "qp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "opfn.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define IB_BTHE_E BIT(IB_BTHE_E_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define OPFN_CODE(code) BIT((code) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define OPFN_MASK(code) OPFN_CODE(STL_VERBS_EXTD_##code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct hfi1_opfn_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) bool (*request)(struct rvt_qp *qp, u64 *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) bool (*response)(struct rvt_qp *qp, u64 *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) bool (*reply)(struct rvt_qp *qp, u64 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void (*error)(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct hfi1_opfn_type hfi1_opfn_handlers[STL_VERBS_EXTD_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) [STL_VERBS_EXTD_TID_RDMA] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .request = tid_rdma_conn_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) .response = tid_rdma_conn_resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) .reply = tid_rdma_conn_reply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) .error = tid_rdma_conn_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct workqueue_struct *opfn_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void opfn_schedule_conn_request(struct rvt_qp *qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static bool hfi1_opfn_extended(u32 bth1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return !!(bth1 & IB_BTHE_E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void opfn_conn_request(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct ib_atomic_wr wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u16 mask, capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct hfi1_opfn_type *extd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) trace_hfi1_opfn_state_conn_request(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spin_lock_irqsave(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Exit if the extended bit is not set, or if nothing is requested, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * if we have completed all requests, or if a previous request is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!priv->opfn.extended || !priv->opfn.requested ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) priv->opfn.requested == priv->opfn.completed || priv->opfn.curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mask = priv->opfn.requested & ~priv->opfn.completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) capcode = ilog2(mask & ~(mask - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (capcode >= STL_VERBS_EXTD_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) priv->opfn.completed |= OPFN_CODE(capcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) extd = &hfi1_opfn_handlers[capcode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!extd || !extd->request || !extd->request(qp, &data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Either there is no handler for this capability or the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * packet could not be generated. Either way, mark it as done so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * we don't keep attempting to complete it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) priv->opfn.completed |= OPFN_CODE(capcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) trace_hfi1_opfn_data_conn_request(qp, capcode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) data = (data & ~0xf) | capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) memset(&wr, 0, sizeof(wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) wr.wr.opcode = IB_WR_OPFN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) wr.remote_addr = HFI1_VERBS_E_ATOMIC_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) wr.compare_add = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) priv->opfn.curr = capcode; /* A new request is now in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Drop opfn.lock before calling ib_post_send() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_unlock_irqrestore(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret = ib_post_send(&qp->ibqp, &wr.wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) trace_hfi1_opfn_state_conn_request(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) trace_hfi1_msg_opfn_conn_request(qp, "ib_ost_send failed: ret = ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) (u64)ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_lock_irqsave(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * In case of an unexpected error return from ib_post_send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * clear opfn.curr and reschedule to try again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) priv->opfn.curr = STL_VERBS_EXTD_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) opfn_schedule_conn_request(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_unlock_irqrestore(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void opfn_send_conn_request(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct hfi1_opfn_data *od;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct hfi1_qp_priv *qpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) od = container_of(work, struct hfi1_opfn_data, opfn_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) qpriv = container_of(od, struct hfi1_qp_priv, opfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) opfn_conn_request(qpriv->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * When QP s_lock is held in the caller, the OPFN request must be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * to a different workqueue to avoid double locking QP s_lock in call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * ib_post_send in opfn_conn_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void opfn_schedule_conn_request(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) trace_hfi1_opfn_state_sched_conn_request(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) queue_work(opfn_wq, &priv->opfn.opfn_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void opfn_conn_response(struct rvt_qp *qp, struct rvt_ack_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct ib_atomic_eth *ateth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 data = be64_to_cpu(ateth->compare_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct hfi1_opfn_type *extd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trace_hfi1_opfn_state_conn_response(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) capcode = data & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) trace_hfi1_opfn_data_conn_response(qp, capcode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) extd = &hfi1_opfn_handlers[capcode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!extd || !extd->response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) e->atomic_data = capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spin_lock_irqsave(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (priv->opfn.completed & OPFN_CODE(capcode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * We are receiving a request for a feature that has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * been negotiated. This may mean that the other side has reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) priv->opfn.completed &= ~OPFN_CODE(capcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (extd->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) extd->error(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (extd->response(qp, &data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) priv->opfn.completed |= OPFN_CODE(capcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) e->atomic_data = (data & ~0xf) | capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) trace_hfi1_opfn_state_conn_response(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_unlock_irqrestore(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void opfn_conn_reply(struct rvt_qp *qp, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct hfi1_opfn_type *extd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u8 capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) trace_hfi1_opfn_state_conn_reply(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) capcode = data & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) trace_hfi1_opfn_data_conn_reply(qp, capcode, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!capcode || capcode >= STL_VERBS_EXTD_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) spin_lock_irqsave(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Either there is no previous request or the reply is not for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * current request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!priv->opfn.curr || capcode != priv->opfn.curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) extd = &hfi1_opfn_handlers[capcode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!extd || !extd->reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (extd->reply(qp, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) priv->opfn.completed |= OPFN_CODE(capcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Clear opfn.curr to indicate that the previous request is no longer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) priv->opfn.curr = STL_VERBS_EXTD_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) trace_hfi1_opfn_state_conn_reply(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_unlock_irqrestore(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void opfn_conn_error(struct rvt_qp *qp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct hfi1_opfn_type *extd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u16 capcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) trace_hfi1_opfn_state_conn_error(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) trace_hfi1_msg_opfn_conn_error(qp, "error. qp state ", (u64)qp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * The QP has gone into the Error state. We have to invalidate all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * negotiated feature, including the one in progress (if any). The RC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * QP handling will clean the WQE for the connection request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_lock_irqsave(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) while (priv->opfn.completed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) capcode = priv->opfn.completed & ~(priv->opfn.completed - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) extd = &hfi1_opfn_handlers[ilog2(capcode) + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (extd->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) extd->error(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) priv->opfn.completed &= ~OPFN_CODE(capcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) priv->opfn.extended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) priv->opfn.requested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) priv->opfn.curr = STL_VERBS_EXTD_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_unlock_irqrestore(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void opfn_qp_init(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct ib_qp *ibqp = &qp->ibqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (attr_mask & IB_QP_RETRY_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) priv->s_retry = attr->retry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spin_lock_irqsave(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ibqp->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct tid_rdma_params *local = &priv->tid_rdma.local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (attr_mask & IB_QP_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) priv->tid_retry_timeout_jiffies = qp->timeout_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (qp->pmtu == enum_to_mtu(OPA_MTU_4096) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) qp->pmtu == enum_to_mtu(OPA_MTU_8192)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) tid_rdma_opfn_init(qp, local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * We only want to set the OPFN requested bit when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * QP transitions to RTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (attr_mask & IB_QP_STATE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) attr->qp_state == IB_QPS_RTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) priv->opfn.requested |= OPFN_MASK(TID_RDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * If the QP is transitioning to RTS and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * opfn.completed for TID RDMA has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * set, the QP is being moved *back* into RTS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * We can now renegotiate the TID RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (priv->opfn.completed &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) OPFN_MASK(TID_RDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) priv->opfn.completed &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ~OPFN_MASK(TID_RDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Since the opfn.completed bit was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * already set, it is safe to assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * that the opfn.extended is also set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) opfn_schedule_conn_request(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) memset(local, 0, sizeof(*local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) spin_unlock_irqrestore(&priv->opfn.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct hfi1_qp_priv *priv = qp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!priv->opfn.extended && hfi1_opfn_extended(bth1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) HFI1_CAP_IS_KSET(OPFN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) priv->opfn.extended = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (qp->state == IB_QPS_RTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) opfn_conn_request(qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int opfn_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) opfn_wq = alloc_workqueue("hfi_opfn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) WQ_MEM_RECLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!opfn_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) void opfn_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (opfn_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) destroy_workqueue(opfn_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) opfn_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }