Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Manage send buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Producer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copy user space data into send buffer, if send buffer space available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Consumer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "smc_wr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "smc_cdc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include "smc_close.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "smc_ism.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "smc_tx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define SMC_TX_WORK_DELAY	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define SMC_TX_CORK_DELAY	(HZ >> 2)	/* 250 ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /***************************** sndbuf producer *******************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /* callback implementation for sk.sk_write_space()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * to wakeup sndbuf producers that blocked with smc_tx_wait().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * called under sk_socket lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static void smc_tx_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct socket *sock = sk->sk_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct smc_sock *smc = smc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/* similar to sk_stream_write_space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	if (atomic_read(&smc->conn.sndbuf_space) && sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		clear_bit(SOCK_NOSPACE, &sock->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			wake_up_interruptible_poll(&wq->wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 						   EPOLLOUT | EPOLLWRNORM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 						   EPOLLWRBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /* Wakeup sndbuf producers that blocked with smc_tx_wait().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (smc->sk.sk_socket &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	    test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		smc->sk.sk_write_space(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /* blocks sndbuf producer until at least one byte of free space available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * or urgent Byte was consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static int smc_tx_wait(struct smc_sock *smc, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/* similar to sk_stream_wait_memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (sk->sk_err ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		    (sk->sk_shutdown & SEND_SHUTDOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		    conn->killed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		    conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			rc = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		if (smc_cdc_rxed_any_close(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			rc = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		if (!timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			/* ensure EPOLLOUT is subsequently generated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			rc = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			break; /* at least 1 byte of free & no urgent data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		sk_wait_event(sk, &timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			      sk->sk_err ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			      smc_cdc_rxed_any_close(conn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			      (atomic_read(&conn->sndbuf_space) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			       !conn->urg_tx_pend),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			      &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static bool smc_tx_is_corked(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* sndbuf producer: main API called by socket layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * called under sock lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	size_t copylen, send_done = 0, send_remaining = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	size_t chunk_len, chunk_off, chunk_len_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	union smc_host_cursor prep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	char *sndbuf_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	int tx_cnt_prep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	int writespace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	int rc, chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/* This should be in poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		rc = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	while (msg_data_left(msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if (sk->sk_state == SMC_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		    (smc->sk.sk_err == ECONNABORTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		    conn->killed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (smc_cdc_rxed_any_close(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			return send_done ?: -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		if (msg->msg_flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			if (send_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 				return send_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			rc = smc_tx_wait(smc, msg->msg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 				goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		/* initialize variables for 1st iteration of subsequent loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		/* could be just 1 byte, even after smc_tx_wait above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		writespace = atomic_read(&conn->sndbuf_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		/* not more than what user space asked for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		copylen = min_t(size_t, send_remaining, writespace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		/* determine start of sndbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		sndbuf_base = conn->sndbuf_desc->cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		tx_cnt_prep = prep.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		/* determine chunks where to write into sndbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		/* either unwrapped case, or 1st chunk of wrapped case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				  tx_cnt_prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		chunk_len_sum = chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		chunk_off = tx_cnt_prep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		smc_sndbuf_sync_sg_for_cpu(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		for (chunk = 0; chunk < 2; chunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			rc = memcpy_from_msg(sndbuf_base + chunk_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 					     msg, chunk_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 				smc_sndbuf_sync_sg_for_device(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				if (send_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 					return send_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			send_done += chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			send_remaining -= chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			if (chunk_len_sum == copylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 				break; /* either on 1st or 2nd iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			/* prepare next (== 2nd) iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			chunk_len = copylen - chunk_len; /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			chunk_len_sum += chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			chunk_off = 0; /* modulo offset in send ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		smc_sndbuf_sync_sg_for_device(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		/* update cursors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		/* increased in send tasklet smc_cdc_tx_handler() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		atomic_sub(copylen, &conn->sndbuf_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		/* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		/* since we just produced more new data into sndbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if ((msg->msg_flags & MSG_OOB) && !send_remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			conn->urg_tx_pend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		    (atomic_read(&conn->sndbuf_space) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 						(conn->sndbuf_desc->len >> 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			/* for a corked socket defer the RDMA writes if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			 * is still sufficient sndbuf_space available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 					   SMC_TX_CORK_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			smc_tx_sndbuf_nonempty(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	} /* while (msg_data_left(msg)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	return send_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	rc = sk_stream_error(sk, msg->msg_flags, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	/* make sure we wake any epoll edge trigger waiter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (unlikely(rc == -EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /***************************** sndbuf consumer *******************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* sndbuf consumer: actual data transfer of one target chunk with ISM write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		      u32 offset, int signal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	struct smc_ism_position pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	memset(&pos, 0, sizeof(pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	pos.token = conn->peer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	pos.index = conn->peer_rmbe_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	pos.offset = conn->tx_off + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	pos.signal = signal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			     int num_sges, struct ib_rdma_wr *rdma_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	struct smc_link_group *lgr = conn->lgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct smc_link *link = conn->lnk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	rdma_wr->wr.num_sge = num_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	rdma_wr->remote_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		/* RMBE within RMB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		conn->tx_off +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		/* offset within RMBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		peer_rmbe_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		smcr_link_down_cond_sched(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* sndbuf consumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline void smc_tx_advance_cursors(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 					  union smc_host_cursor *prod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 					  union smc_host_cursor *sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 					  size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	smc_curs_add(conn->peer_rmbe_size, prod, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	/* increased in recv tasklet smc_cdc_msg_rcv() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	/* data in flight reduces usable snd_wnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	atomic_sub(len, &conn->peer_rmbe_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	/* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	smc_curs_add(conn->sndbuf_desc->len, sent, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* SMC-R helper for smc_tx_rdma_writes() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			       size_t src_off, size_t src_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			       size_t dst_off, size_t dst_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			       struct smc_rdma_wr *wr_rdma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct smc_link *link = conn->lnk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	dma_addr_t dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	int src_len_sum = src_len, dst_len_sum = dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	int sent_count = src_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	int srcchunk, dstchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	int num_sges;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	for (dstchunk = 0; dstchunk < 2; dstchunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		struct ib_sge *sge =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		num_sges = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		for (srcchunk = 0; srcchunk < 2; srcchunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			sge[srcchunk].addr = dma_addr + src_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			sge[srcchunk].length = src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			num_sges++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			src_off += src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			if (src_off >= conn->sndbuf_desc->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				src_off -= conn->sndbuf_desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 						/* modulo in send ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			if (src_len_sum == dst_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				break; /* either on 1st or 2nd iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			/* prepare next (== 2nd) iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			src_len = dst_len - src_len; /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			src_len_sum += src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		rc = smc_tx_rdma_write(conn, dst_off, num_sges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				       &wr_rdma_buf->wr_tx_rdma[dstchunk]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		if (dst_len_sum == len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			break; /* either on 1st or 2nd iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		/* prepare next (== 2nd) iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		dst_off = 0; /* modulo offset in RMBE ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		dst_len = len - dst_len; /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		dst_len_sum += dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				sent_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		src_len_sum = src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* SMC-D helper for smc_tx_rdma_writes() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			       size_t src_off, size_t src_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			       size_t dst_off, size_t dst_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	int src_len_sum = src_len, dst_len_sum = dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	int srcchunk, dstchunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	for (dstchunk = 0; dstchunk < 2; dstchunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		for (srcchunk = 0; srcchunk < 2; srcchunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			void *data = conn->sndbuf_desc->cpu_addr + src_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 					       sizeof(struct smcd_cdc_msg), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			dst_off += src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			src_off += src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			if (src_off >= conn->sndbuf_desc->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 				src_off -= conn->sndbuf_desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 						/* modulo in send ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			if (src_len_sum == dst_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 				break; /* either on 1st or 2nd iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			/* prepare next (== 2nd) iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			src_len = dst_len - src_len; /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			src_len_sum += src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		if (dst_len_sum == len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			break; /* either on 1st or 2nd iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		/* prepare next (== 2nd) iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		dst_off = 0; /* modulo offset in RMBE ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		dst_len = len - dst_len; /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		dst_len_sum += dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		src_len_sum = src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * usable snd_wnd as max transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static int smc_tx_rdma_writes(struct smc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			      struct smc_rdma_wr *wr_rdma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	size_t len, src_len, dst_off, dst_len; /* current chunk values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	union smc_host_cursor sent, prep, prod, cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	struct smc_cdc_producer_flags *pflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	int to_send, rmbespace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	/* source: sndbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	/* cf. wmem_alloc - (snd_max - snd_una) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (to_send <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	/* destination: RMBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	/* cf. snd_wnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	rmbespace = atomic_read(&conn->peer_rmbe_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	if (rmbespace <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	/* if usable snd_wnd closes ask peer to advertise once it opens again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	pflags = &conn->local_tx_ctrl.prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	pflags->write_blocked = (to_send >= rmbespace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	/* cf. usable snd_wnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	len = min(to_send, rmbespace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	/* initialize variables for first iteration of subsequent nested loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	dst_off = prod.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (prod.wrap == cons.wrap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		/* the filled destination area is unwrapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		 * hence the available free destination space is wrapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		 * and we need 2 destination chunks of sum len; start with 1st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		 * which is limited by what's available in sndbuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		dst_len = min_t(size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				conn->peer_rmbe_size - prod.count, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		/* the filled destination area is wrapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		 * hence the available free destination space is unwrapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		 * and we need a single destination chunk of entire len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		dst_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	/* dst_len determines the maximum src_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	if (sent.count + dst_len <= conn->sndbuf_desc->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		/* unwrapped src case: single chunk of entire dst_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		src_len = dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		/* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		src_len = conn->sndbuf_desc->len - sent.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	if (conn->lgr->is_smcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 					 dst_off, dst_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 					 dst_off, dst_len, wr_rdma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (conn->urg_tx_pend && len == to_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		pflags->urg_data_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	smc_tx_advance_cursors(conn, &prod, &sent, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	/* update connection's cursors with advanced local cursors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 							/* dst: peer RMBE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Wakeup sndbuf consumers from any context (IRQ or process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  * since there is more data to transmit; usable snd_wnd as max transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	struct smc_link *link = conn->lnk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	struct smc_rdma_wr *wr_rdma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	struct smc_cdc_tx_pend *pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	struct smc_wr_buf *wr_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (!link || !smc_wr_tx_link_hold(link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		return -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		smc_wr_tx_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		if (rc == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			struct smc_sock *smc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			if (smc->sk.sk_err == ECONNABORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 				return sock_error(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			if (conn->killed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 				return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 					 SMC_TX_WORK_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	spin_lock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	if (link != conn->lnk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		/* link of connection changed, tx_work will restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		smc_wr_tx_put_slot(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 				   (struct smc_wr_tx_pend_priv *)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		rc = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (!pflags->urg_data_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			smc_wr_tx_put_slot(link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 					   (struct smc_wr_tx_pend_priv *)pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	rc = smc_cdc_msg_send(conn, wr_buf, pend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	if (!rc && pflags->urg_data_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		pflags->urg_data_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		pflags->urg_data_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	spin_unlock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	smc_wr_tx_link_put(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	spin_lock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	if (!pflags->urg_data_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		rc = smc_tx_rdma_writes(conn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		rc = smcd_cdc_msg_send(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	if (!rc && pflags->urg_data_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		pflags->urg_data_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		pflags->urg_data_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	spin_unlock_bh(&conn->send_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	if (conn->killed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	    conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		return -EPIPE;	/* connection being aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (conn->lgr->is_smcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		rc = smcd_tx_sndbuf_nonempty(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		rc = smcr_tx_sndbuf_nonempty(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		/* trigger socket release if connection is closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		struct smc_sock *smc = container_of(conn, struct smc_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 						    conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		smc_close_wake_tx_prepared(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Wakeup sndbuf consumers from process context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)  * since there is more data to transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) void smc_tx_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	struct smc_connection *conn = container_of(to_delayed_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 						   struct smc_connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 						   tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	lock_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (smc->sk.sk_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	rc = smc_tx_sndbuf_nonempty(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	    !atomic_read(&conn->bytes_to_rcv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		conn->local_rx_ctrl.prod_flags.write_blocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	release_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) void smc_tx_consumer_update(struct smc_connection *conn, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	union smc_host_cursor cfed, cons, prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	int sender_free = conn->rmb_desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	int to_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (to_confirm > conn->rmbe_update_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		sender_free = conn->rmb_desc->len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 			      smc_curs_diff_large(conn->rmb_desc->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 						  &cfed, &prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	    force ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	    ((to_confirm > conn->rmbe_update_limit) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	     ((sender_free <= (conn->rmb_desc->len / 2)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	      conn->local_rx_ctrl.prod_flags.write_blocked))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		if (conn->killed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		    conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		    !conn->killed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 			queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 					   SMC_TX_WORK_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	if (conn->local_rx_ctrl.prod_flags.write_blocked &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	    !atomic_read(&conn->bytes_to_rcv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		conn->local_rx_ctrl.prod_flags.write_blocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /***************************** send initialize *******************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Initialize send properties on connection establishment. NB: not __init! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) void smc_tx_init(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	smc->sk.sk_write_space = smc_tx_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }