Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Manage RMBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * copy new RMBE data into user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "smc_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "smc_cdc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "smc_tx.h" /* smc_tx_consumer_update() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "smc_rx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /* callback implementation to wakeup consumers blocked with smc_rx_wait().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * indirectly called by smc_cdc_msg_recv_action().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static void smc_rx_wake_up(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	/* derived from sock_def_readable() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	/* called already in smc_listen_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 						EPOLLRDNORM | EPOLLRDBAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	    (sk->sk_state == SMC_CLOSED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* Update consumer cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *   @conn   connection to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *   @cons   consumer cursor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *   @len    number of Bytes consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *   Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *   1 if we should end our receive, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static int smc_rx_update_consumer(struct smc_sock *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 				  union smc_host_cursor cons, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	bool force = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	int diff, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	smc_curs_add(conn->rmb_desc->len, &cons, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	/* did we process urgent data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		diff = smc_curs_comp(conn->rmb_desc->len, &cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 				     &conn->urg_curs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		if (sock_flag(sk, SOCK_URGINLINE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			if (diff == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 				force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 				rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 				conn->urg_state = SMC_URG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			if (diff == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 				/* skip urgent byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 				force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				smc_curs_add(conn->rmb_desc->len, &cons, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				conn->urg_rx_skip_pend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			} else if (diff < -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 				/* we read past urgent byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 				conn->urg_state = SMC_URG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/* send consumer cursor update if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/* similar to advertising new TCP rcv_wnd if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	smc_tx_consumer_update(conn, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static void smc_rx_update_cons(struct smc_sock *smc, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	union smc_host_cursor cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	smc_rx_update_consumer(smc, cons, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct smc_spd_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct smc_sock *smc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	size_t		 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 				    struct pipe_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct smc_sock *smc = priv->smc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct smc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (sk->sk_state == SMC_CLOSED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	    sk->sk_state == SMC_PEERFINCLOSEWAIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	    sk->sk_state == SMC_APPFINCLOSEWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	smc_rx_update_cons(smc, priv->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (atomic_sub_and_test(priv->len, &conn->splice_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		smc_rx_wake_up(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	put_page(buf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static const struct pipe_buf_operations smc_pipe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.release = smc_rx_pipe_buf_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	.get = generic_pipe_buf_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void smc_rx_spd_release(struct splice_pipe_desc *spd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			       unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	put_page(spd->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			 struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct splice_pipe_desc spd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct partial_page partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	struct smc_spd_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	int bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	priv->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	priv->smc = smc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	partial.offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	partial.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	partial.private = (unsigned long)priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	spd.nr_pages_max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	spd.nr_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	spd.pages = &smc->conn.rmb_desc->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	spd.partial = &partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	spd.ops = &smc_pipe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	spd.spd_release = smc_rx_spd_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	bytes = splice_to_pipe(pipe, &spd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (bytes > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		sock_hold(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		get_page(smc->conn.rmb_desc->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		atomic_add(bytes, &smc->conn.splice_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return atomic_read(&conn->bytes_to_rcv) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	       !atomic_read(&conn->splice_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *   @smc    smc socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *   @timeo  pointer to max seconds to wait, pointer to value 0 for no timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *   @fcrit  add'l criterion to evaluate as function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int smc_rx_wait(struct smc_sock *smc, long *timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		int (*fcrit)(struct smc_connection *conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct smc_cdc_conn_state_flags *cflags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 					&conn->local_tx_ctrl.conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (fcrit(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	rc = sk_wait_event(sk, timeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			   sk->sk_err ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			   cflags->peer_conn_abort ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			   sk->sk_shutdown & RCV_SHUTDOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			   conn->killed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			   fcrit(conn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			   &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			   int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	union smc_host_cursor cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (sock_flag(sk, SOCK_URGINLINE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	    !(conn->urg_state == SMC_URG_VALID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	    conn->urg_state == SMC_URG_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (conn->urg_state == SMC_URG_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		if (!(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			smc->conn.urg_state = SMC_URG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		msg->msg_flags |= MSG_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		if (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			if (!(flags & MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			if (smc_curs_diff(conn->rmb_desc->len, &cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 					  &conn->urg_curs) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 				conn->urg_rx_skip_pend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			/* Urgent Byte was already accounted for, but trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			 * skipping the urgent byte in non-inline case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			if (!(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 				smc_rx_update_consumer(smc, cons, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return rc ? -EFAULT : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (smc_rx_data_available(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	else if (conn->urg_state == SMC_URG_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		/* we received a single urgent Byte - skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		smc_rx_update_cons(smc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* smc_rx_recvmsg - receive data from RMBE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * @msg:	copy data to receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * @pipe:	copy data to pipe if set - indicates splice() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * rcvbuf consumer: main API called by socket layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * Called under sk lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		   struct pipe_inode_info *pipe, size_t len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	size_t copylen, read_done = 0, read_remaining = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	size_t chunk_len, chunk_off, chunk_len_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	int (*func)(struct smc_connection *conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	union smc_host_cursor cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	int readable, chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	char *rcvbuf_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	int splbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int target;		/* Read at least these many bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (unlikely(flags & MSG_ERRQUEUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		return -EINVAL; /* future work for sk.sk_family == AF_SMC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (sk->sk_state == SMC_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		return smc_rx_recv_urg(smc, msg, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	do { /* while (read_remaining) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if (read_done >= target || (pipe && read_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		if (conn->killed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (smc_rx_recvmsg_data_available(smc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			goto copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			/* smc_cdc_msg_recv_action() could have run after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			 * above smc_rx_recvmsg_data_available()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			if (smc_rx_recvmsg_data_available(smc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 				goto copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		if (read_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			if (sk->sk_err ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			    sk->sk_state == SMC_CLOSED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			    !timeo ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			    signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			if (sk->sk_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				read_done = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			if (sk->sk_state == SMC_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				if (!sock_flag(sk, SOCK_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 					/* This occurs when user tries to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					 * from never connected socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 					read_done = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				read_done = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 				return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		if (!smc_rx_data_available(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			smc_rx_wait(smc, &timeo, smc_rx_data_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		/* initialize variables for 1st iteration of subsequent loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		/* could be just 1 byte, even after waiting on data above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		readable = atomic_read(&conn->bytes_to_rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		splbytes = atomic_read(&conn->splice_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if (!readable || (msg && splbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			if (splbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 				func = smc_rx_data_available_and_no_splice_pend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				func = smc_rx_data_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			smc_rx_wait(smc, &timeo, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		/* subsequent splice() calls pick up where previous left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		if (splbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		if (conn->urg_state == SMC_URG_VALID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		    sock_flag(&smc->sk, SOCK_URGINLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		    readable > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			readable--;	/* always stop at urgent Byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		/* not more than what user space asked for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		copylen = min_t(size_t, read_remaining, readable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		/* determine chunks where to read from rcvbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		/* either unwrapped case, or 1st chunk of wrapped case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		chunk_len = min_t(size_t, copylen, conn->rmb_desc->len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 				  cons.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		chunk_len_sum = chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		chunk_off = cons.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		smc_rmb_sync_sg_for_cpu(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		for (chunk = 0; chunk < 2; chunk++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			if (!(flags & MSG_TRUNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 				if (msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 					rc = memcpy_to_msg(msg, rcvbuf_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 							   chunk_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 							   chunk_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 					rc = smc_rx_splice(pipe, rcvbuf_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 							chunk_off, chunk_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 							smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 				if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 					if (!read_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 						read_done = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 					smc_rmb_sync_sg_for_device(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			read_remaining -= chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			read_done += chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			if (chunk_len_sum == copylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 				break; /* either on 1st or 2nd iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			/* prepare next (== 2nd) iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			chunk_len = copylen - chunk_len; /* remainder */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			chunk_len_sum += chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			chunk_off = 0; /* modulo offset in recv ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		smc_rmb_sync_sg_for_device(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		/* update cursors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		if (!(flags & MSG_PEEK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			/* increased in recv tasklet smc_cdc_msg_rcv() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			atomic_sub(copylen, &conn->bytes_to_rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			/* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			if (msg && smc_rx_update_consumer(smc, cons, copylen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	} while (read_remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	return read_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* Initialize receive properties on connection establishment. NB: not __init! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void smc_rx_init(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	smc->sk.sk_data_ready = smc_rx_wake_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	atomic_set(&smc->conn.splice_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	smc->conn.urg_state = SMC_URG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }