^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Connection Data Control (CDC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * handles flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "smc_wr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "smc_cdc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "smc_tx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "smc_rx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "smc_close.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /********************************** send *************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* handler for send/transmission completion of a CDC msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) enum ib_wc_status wc_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct smc_connection *conn = cdcpend->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct smc_sock *smc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) smc = container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) bh_lock_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!wc_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) &cdcpend->conn->tx_curs_fin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) &cdcpend->cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* sndbuf_space is decreased in smc_sendmsg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) atomic_add(diff, &cdcpend->conn->sndbuf_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) smc_curs_copy(&conn->local_tx_ctrl_fin, &cdcpend->p_cursor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) wake_up(&conn->cdc_pend_tx_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) smc_tx_sndbuf_nonfull(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) bh_unlock_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int smc_cdc_get_free_slot(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct smc_wr_buf **wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct smc_rdma_wr **wr_rdma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct smc_cdc_tx_pend **pend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) wr_rdma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) (struct smc_wr_tx_pend_priv **)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (conn->killed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* abnormal termination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) smc_wr_tx_put_slot(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) (struct smc_wr_tx_pend_priv *)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) rc = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct smc_cdc_tx_pend *pend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) BUILD_BUG_ON_MSG(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) BUILD_BUG_ON_MSG(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) BUILD_BUG_ON_MSG(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pend->conn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pend->cursor = conn->tx_curs_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pend->p_cursor = conn->local_tx_ctrl.prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pend->ctrl_seq = conn->tx_cdc_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int smc_cdc_msg_send(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct smc_wr_buf *wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct smc_cdc_tx_pend *pend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct smc_link *link = conn->lnk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) union smc_host_cursor cfed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) smc_cdc_add_pending_send(conn, pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) conn->tx_cdc_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) atomic_inc(&conn->cdc_pend_tx_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) conn->tx_cdc_seq--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) atomic_dec(&conn->cdc_pend_tx_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* send a validation msg indicating the move of a conn to an other QP link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int smcr_cdc_msg_send_validation(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct smc_cdc_tx_pend *pend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct smc_wr_buf *wr_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct smc_link *link = conn->lnk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct smc_cdc_msg *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) peer = (struct smc_cdc_msg *)wr_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) peer->common.type = local->common.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) peer->len = local->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) peer->seqno = htons(conn->tx_cdc_seq_fin); /* seqno last compl. tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) peer->token = htonl(local->token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) peer->prod_flags.failover_validation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * can handle properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) smc_cdc_add_pending_send(conn, pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) atomic_inc(&conn->cdc_pend_tx_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (unlikely(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) atomic_dec(&conn->cdc_pend_tx_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct smc_cdc_tx_pend *pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct smc_wr_buf *wr_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct smc_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool again = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) link = conn->lnk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!smc_wr_tx_link_hold(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto put_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) spin_lock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (link != conn->lnk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* link of connection changed, try again one time*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_unlock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) smc_wr_tx_put_slot(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (struct smc_wr_tx_pend_priv *)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) smc_wr_tx_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (again)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) again = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) rc = smc_cdc_msg_send(conn, wr_buf, pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spin_unlock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) put_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) smc_wr_tx_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (conn->lgr->is_smcd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spin_lock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) rc = smcd_cdc_msg_send(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) spin_unlock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) rc = smcr_cdc_get_slot_and_msg_send(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Send a SMC-D CDC header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * This increments the free space available in our send buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * Also update the confirmed receive buffer with what was sent to the peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int smcd_cdc_msg_send(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) union smc_host_cursor curs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct smcd_cdc_msg cdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int rc, diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) memset(&cdc, 0, sizeof(cdc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) cdc.common.type = SMC_CDC_MSG_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) cdc.prod.wrap = curs.wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) cdc.prod.count = curs.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cdc.cons.wrap = curs.wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) cdc.cons.count = curs.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Calculate transmitted data and increment free send buffer space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) &conn->tx_curs_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* increased by confirmed number of bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) atomic_add(diff, &conn->sndbuf_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) smc_tx_sndbuf_nonfull(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /********************************* receive ***********************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static inline bool smc_cdc_before(u16 seq1, u16 seq2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return (s16)(seq1 - seq2) < 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int *diff_prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) char *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* new data included urgent business */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) conn->urg_state = SMC_URG_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!sock_flag(&smc->sk, SOCK_URGINLINE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* we'll skip the urgent byte, so don't account for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) (*diff_prod)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (conn->urg_curs.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) sk_send_sigurg(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void smc_cdc_msg_validate(struct smc_sock *smc, struct smc_cdc_msg *cdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct smc_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u16 recv_seq = ntohs(cdc->seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) s16 diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* check that seqnum was seen before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) diff = conn->local_rx_ctrl.seqno - recv_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (diff < 0) { /* diff larger than 0x7fff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* drop connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) conn->out_of_sync = 1; /* prevent any further receives */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) spin_lock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) conn->lnk = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) spin_unlock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sock_hold(&smc->sk); /* sock_put in abort_work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!queue_work(smc_close_wq, &conn->abort_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) sock_put(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static void smc_cdc_msg_recv_action(struct smc_sock *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct smc_cdc_msg *cdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) union smc_host_cursor cons_old, prod_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int diff_cons, diff_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) &conn->local_rx_ctrl.cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (diff_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* peer_rmbe_space is decreased during data transfer with RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) atomic_add(diff_cons, &conn->peer_rmbe_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) &conn->local_rx_ctrl.prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (diff_prod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (conn->local_rx_ctrl.prod_flags.urg_data_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* bytes_to_rcv is decreased in smc_recvmsg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) atomic_add(diff_prod, &conn->bytes_to_rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) smc->sk.sk_data_ready(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (conn->local_rx_ctrl.prod_flags.write_blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) smc->sk.sk_data_ready(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) conn->urg_state = SMC_URG_NOTYET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if ((diff_cons && smc_tx_prepared_sends(conn)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) conn->local_rx_ctrl.prod_flags.urg_data_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) smc_tx_sndbuf_nonempty(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (diff_cons && conn->urg_tx_pend &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* urg data confirmed by peer, indicate we're ready for more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) conn->urg_tx_pend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) smc->sk.sk_write_space(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) smc->sk.sk_err = ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (smc_cdc_rxed_any_close_or_senddone(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) smc->sk.sk_shutdown |= RCV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (smc->clcsock && smc->clcsock->sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) sock_set_flag(&smc->sk, SOCK_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sock_hold(&smc->sk); /* sock_put in close_work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!queue_work(smc_close_wq, &conn->close_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) sock_put(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* called under tasklet context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sock_hold(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) bh_lock_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) smc_cdc_msg_recv_action(smc, cdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bh_unlock_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) sock_put(&smc->sk); /* no free sk in softirq-context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * handler to indicate update in the DMBE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * - tasklet context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static void smcd_cdc_rx_tsklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct smc_connection *conn = (struct smc_connection *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct smcd_cdc_msg *data_cdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct smcd_cdc_msg cdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct smc_sock *smc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!conn || conn->killed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) smc = container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* Initialize receive tasklet. Called from ISM device IRQ handler to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * receiver side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) void smcd_cdc_rx_init(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /***************************** init, exit, misc ******************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct smc_cdc_msg *cdc = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct smc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct smc_link_group *lgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct smc_sock *smc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return; /* short message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (cdc->len != SMC_WR_TX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return; /* invalid message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* lookup connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) lgr = smc_get_lgr(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) read_lock_bh(&lgr->conns_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) read_unlock_bh(&lgr->conns_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!conn || conn->out_of_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) smc = container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (cdc->prod_flags.failover_validation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) smc_cdc_msg_validate(smc, cdc, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (smc_cdc_before(ntohs(cdc->seqno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) conn->local_rx_ctrl.seqno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* received seqno is old */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) smc_cdc_msg_recv(smc, cdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .handler = smc_cdc_rx_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) .type = SMC_CDC_MSG_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .handler = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int __init smc_cdc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct smc_wr_rx_handler *handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) INIT_HLIST_NODE(&handler->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) rc = smc_wr_rx_register_handler(handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }