^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Connection Data Control (CDC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifndef SMC_CDC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define SMC_CDC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h> /* max_t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "smc_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "smc_wr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SMC_CDC_MSG_TYPE 0xFE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* in network byte order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) union smc_cdc_cursor { /* SMC cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __be16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __be16 wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __be32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #ifdef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) atomic64_t acurs; /* for atomic processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u64 acurs; /* for atomic processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) } __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* in network byte order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct smc_cdc_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct smc_wr_rx_hdr common; /* .type = 0xFE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u8 len; /* 44 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __be16 seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __be32 token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) union smc_cdc_cursor prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) union smc_cdc_cursor cons; /* piggy backed "ack" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct smc_cdc_producer_flags prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct smc_cdc_conn_state_flags conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 reserved[18];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* SMC-D cursor format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) union smcd_cdc_cursor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u16 wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct smc_cdc_producer_flags prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct smc_cdc_conn_state_flags conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) atomic64_t acurs; /* for atomic processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u64 acurs; /* for atomic processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) } __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* CDC message for SMC-D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct smcd_cdc_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct smc_wr_rx_hdr common; /* Type = 0xFE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u8 res1[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) union smcd_cdc_cursor prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) union smcd_cdc_cursor cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u8 res3[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) } __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) conn->local_rx_ctrl.conn_state_flags.peer_conn_closed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline bool smc_cdc_rxed_any_close_or_senddone(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return smc_cdc_rxed_any_close(conn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) conn->local_rx_ctrl.conn_state_flags.peer_done_writing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline void smc_curs_add(int size, union smc_host_cursor *curs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) curs->count += value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (curs->count >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) curs->wrap++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) curs->count -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Copy cursor src into tgt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline void smc_curs_copy(union smc_host_cursor *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) union smc_host_cursor *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifndef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_lock_irqsave(&conn->acurs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) tgt->acurs = src->acurs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) spin_unlock_irqrestore(&conn->acurs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) union smc_cdc_cursor *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifndef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_lock_irqsave(&conn->acurs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) tgt->acurs = src->acurs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spin_unlock_irqrestore(&conn->acurs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) union smcd_cdc_cursor *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #ifndef KERNEL_HAS_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_lock_irqsave(&conn->acurs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) tgt->acurs = src->acurs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spin_unlock_irqrestore(&conn->acurs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* calculate cursor difference between old and new, where old <= new and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * difference cannot exceed size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline int smc_curs_diff(unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) union smc_host_cursor *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) union smc_host_cursor *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (old->wrap != new->wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return max_t(int, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ((size - old->count) + new->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return max_t(int, 0, (new->count - old->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* calculate cursor difference between old and new - returns negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * value in case old > new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline int smc_curs_comp(unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) union smc_host_cursor *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) union smc_host_cursor *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (old->wrap > new->wrap ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) (old->wrap == new->wrap && old->count > new->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -smc_curs_diff(size, new, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return smc_curs_diff(size, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* calculate cursor difference between old and new, where old <= new and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * difference may exceed size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline int smc_curs_diff_large(unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) union smc_host_cursor *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) union smc_host_cursor *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (old->wrap < new->wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return min_t(int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (size - old->count) + new->count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) (new->wrap - old->wrap - 1) * size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return min_t(int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (size - old->count) + new->count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) (new->wrap + 0xffff - old->wrap) * size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return max_t(int, 0, (new->count - old->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) union smc_host_cursor *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) union smc_host_cursor *save,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) smc_curs_copy(save, local, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) peer->count = htonl(save->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) peer->wrap = htons(save->wrap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* peer->reserved = htons(0); must be ensured by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) union smc_host_cursor *save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) peer->common.type = local->common.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) peer->len = local->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) peer->seqno = htons(local->seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) peer->token = htonl(local->token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) peer->prod_flags = local->prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) peer->conn_state_flags = local->conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) union smc_cdc_cursor *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) union smc_host_cursor temp, old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) union smc_cdc_cursor net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) smc_curs_copy(&old, local, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) smc_curs_copy_net(&net, peer, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) temp.count = ntohl(net.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) temp.wrap = ntohs(net.wrap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if ((old.wrap > temp.wrap) && temp.wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if ((old.wrap == temp.wrap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) (old.count > temp.count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) smc_curs_copy(local, &temp, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct smc_cdc_msg *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) local->common.type = peer->common.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) local->len = peer->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) local->seqno = ntohs(peer->seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) local->token = ntohl(peer->token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) local->prod_flags = peer->prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) local->conn_state_flags = peer->conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct smcd_cdc_msg *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) union smc_host_cursor temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) temp.wrap = peer->prod.wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) temp.count = peer->prod.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) smc_curs_copy(&local->prod, &temp, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) temp.wrap = peer->cons.wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) temp.count = peer->cons.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) smc_curs_copy(&local->cons, &temp, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) local->prod_flags = peer->cons.prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) local->conn_state_flags = peer->cons.conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct smc_cdc_msg *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (conn->lgr->is_smcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) smcr_cdc_msg_to_host(local, peer, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct smc_cdc_tx_pend {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct smc_connection *conn; /* socket connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) union smc_host_cursor cursor; /* tx sndbuf cursor sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u16 ctrl_seq; /* conn. tx sequence # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int smc_cdc_get_free_slot(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct smc_link *link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct smc_wr_buf **wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct smc_rdma_wr **wr_rdma_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct smc_cdc_tx_pend **pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct smc_cdc_tx_pend *pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int smcd_cdc_msg_send(struct smc_connection *conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int smcr_cdc_msg_send_validation(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct smc_cdc_tx_pend *pend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct smc_wr_buf *wr_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int smc_cdc_init(void) __init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void smcd_cdc_rx_init(struct smc_connection *conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif /* SMC_CDC_H */