^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Shared Memory Communications over RDMA (SMC-R) and RoCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Socket Closing - normal and abnormal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright IBM Corp. 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "smc_tx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "smc_cdc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "smc_close.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* release the clcsock that is assigned to the smc_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void smc_clcsock_release(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct socket *tcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (smc->listen_smc && current_work() != &smc->smc_listen_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) cancel_work_sync(&smc->smc_listen_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) mutex_lock(&smc->clcsock_release_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (smc->clcsock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) tcp = smc->clcsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) smc->clcsock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) sock_release(tcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) mutex_unlock(&smc->clcsock_release_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void smc_close_cleanup_listen(struct sock *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Close non-accepted connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) while ((sk = smc_accept_dequeue(parent, NULL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) smc_close_non_accepted(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* wait for sndbuf data being transmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!smc_tx_prepared_sends(&smc->conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) smc->wait_close_tx_prepared = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) add_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) while (!signal_pending(current) && timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) rc = sk_wait_event(sk, &timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) !smc_tx_prepared_sends(&smc->conn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) sk->sk_err == ECONNABORTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sk->sk_err == ECONNRESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) smc->conn.killed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) smc->wait_close_tx_prepared = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void smc_close_wake_tx_prepared(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (smc->wait_close_tx_prepared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* wake up socket closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) smc->sk.sk_state_change(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int smc_close_wr(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return smc_cdc_get_slot_and_msg_send(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static int smc_close_final(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (atomic_read(&conn->bytes_to_rcv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (conn->killed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return smc_cdc_get_slot_and_msg_send(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int smc_close_abort(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return smc_cdc_get_slot_and_msg_send(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void smc_close_cancel_work(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cancel_work_sync(&smc->conn.close_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cancel_delayed_work_sync(&smc->conn.tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* terminate smc socket abnormally - active abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * link group is terminated, i.e. RDMA communication no longer possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void smc_close_active_abort(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bool release_clcsock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) sk->sk_err = ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (smc->clcsock && smc->clcsock->sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) tcp_abort(smc->clcsock->sk, ECONNABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case SMC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) case SMC_APPCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case SMC_APPCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) sk->sk_state = SMC_PEERABORTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) smc_close_cancel_work(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (sk->sk_state != SMC_PEERABORTWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) sock_put(sk); /* (postponed) passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) case SMC_PEERCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) case SMC_PEERCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) case SMC_PEERFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sk->sk_state = SMC_PEERABORTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) smc_close_cancel_work(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (sk->sk_state != SMC_PEERABORTWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) smc_conn_free(&smc->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) release_clcsock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sock_put(sk); /* passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) case SMC_PROCESSABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) case SMC_APPFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) sk->sk_state = SMC_PEERABORTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) smc_close_cancel_work(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (sk->sk_state != SMC_PEERABORTWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) smc_conn_free(&smc->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) release_clcsock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) case SMC_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case SMC_PEERABORTWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case SMC_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sock_set_flag(sk, SOCK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (release_clcsock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) smc_clcsock_release(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline bool smc_close_sent_any_close(struct smc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) conn->local_tx_ctrl.conn_state_flags.peer_conn_closed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int smc_close_active(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct smc_cdc_conn_state_flags *txflags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) &smc->conn.local_tx_ctrl.conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int rc1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) timeout = current->flags & PF_EXITING ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 0 : sock_flag(sk, SOCK_LINGER) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) old_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case SMC_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case SMC_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sk->sk_state_change(sk); /* wake up accept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (smc->clcsock && smc->clcsock->sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) smc->clcsock->sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) smc_close_cleanup_listen(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) flush_work(&smc->tcp_listen_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) case SMC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) smc_close_stream_wait(smc, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) cancel_delayed_work_sync(&conn->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (sk->sk_state == SMC_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* send close request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rc = smc_close_final(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) sk->sk_state = SMC_PEERCLOSEWAIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* actively shutdown clcsock before peer close it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * prevent peer from entering TIME_WAIT state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (smc->clcsock && smc->clcsock->sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rc1 = kernel_sock_shutdown(smc->clcsock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rc = rc ? rc : rc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* peer event has changed the state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) case SMC_APPFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* socket already shutdown wr or both (active close) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (txflags->peer_done_writing &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) !smc_close_sent_any_close(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* just shutdown wr done, send close request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) rc = smc_close_final(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case SMC_APPCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case SMC_APPCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (!smc_cdc_rxed_any_close(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) smc_close_stream_wait(smc, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) cancel_delayed_work_sync(&conn->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) sk->sk_state != SMC_APPCLOSEWAIT2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* confirm close from peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rc = smc_close_final(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (smc_cdc_rxed_any_close(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* peer has closed the socket already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) sock_put(sk); /* postponed passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* peer has just issued a shutdown write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) sk->sk_state = SMC_PEERFINCLOSEWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case SMC_PEERCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case SMC_PEERCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (txflags->peer_done_writing &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) !smc_close_sent_any_close(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* just shutdown wr done, send close request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rc = smc_close_final(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* peer sending PeerConnectionClosed will cause transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case SMC_PEERFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* peer sending PeerConnectionClosed will cause transition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case SMC_PROCESSABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rc = smc_close_abort(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case SMC_PEERABORTWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case SMC_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* nothing to do, add tracing in future patch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (old_state != sk->sk_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static void smc_close_passive_abort_received(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct smc_cdc_conn_state_flags *txflags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) &smc->conn.local_tx_ctrl.conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) case SMC_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) case SMC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) case SMC_APPCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) sk->sk_state = SMC_PROCESSABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) sock_put(sk); /* passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) case SMC_APPFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sk->sk_state = SMC_PROCESSABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) case SMC_PEERCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case SMC_PEERCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (txflags->peer_done_writing &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) !smc_close_sent_any_close(&smc->conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* just shutdown, but not yet closed locally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) sk->sk_state = SMC_PROCESSABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) sock_put(sk); /* passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) case SMC_APPCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) case SMC_PEERFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) sock_put(sk); /* passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case SMC_PEERABORTWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case SMC_PROCESSABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* nothing to do, add tracing in future patch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Either some kind of closing has been received: peer_conn_closed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * peer_conn_abort, or peer_done_writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * or the link group of the connection terminates abnormally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void smc_close_passive_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct smc_connection *conn = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct smc_connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) close_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct smc_cdc_conn_state_flags *rxflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) bool release_clcsock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) old_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) rxflags = &conn->local_rx_ctrl.conn_state_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (rxflags->peer_conn_abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* peer has not received all data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) smc_close_passive_abort_received(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) release_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cancel_delayed_work_sync(&conn->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) lock_sock(&smc->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) case SMC_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) sk->sk_state = SMC_APPCLOSEWAIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) case SMC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) sk->sk_state = SMC_APPCLOSEWAIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* postpone sock_put() for passive closing to cover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * received SEND_SHUTDOWN as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case SMC_PEERCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (rxflags->peer_done_writing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) sk->sk_state = SMC_PEERCLOSEWAIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* to check for closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case SMC_PEERCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!smc_cdc_rxed_any_close(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (sock_flag(sk, SOCK_DEAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) smc_close_sent_any_close(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* smc_release has already been called locally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* just shutdown, but not yet closed locally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) sk->sk_state = SMC_APPFINCLOSEWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sock_put(sk); /* passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) case SMC_PEERFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (smc_cdc_rxed_any_close(conn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) sk->sk_state = SMC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) sock_put(sk); /* passive closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) case SMC_APPCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case SMC_APPCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* postpone sock_put() for passive closing to cover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * received SEND_SHUTDOWN as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) case SMC_APPFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) case SMC_PEERABORTWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case SMC_PROCESSABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) case SMC_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* nothing to do, add tracing in future patch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) wakeup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (old_state != sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if ((sk->sk_state == SMC_CLOSED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) smc_conn_free(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (smc->clcsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) release_clcsock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (release_clcsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) smc_clcsock_release(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sock_put(sk); /* sock_hold done by schedulers of close_work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int smc_close_shutdown_write(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct smc_connection *conn = &smc->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct sock *sk = &smc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) timeout = current->flags & PF_EXITING ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 0 : sock_flag(sk, SOCK_LINGER) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) old_state = sk->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) case SMC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) smc_close_stream_wait(smc, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) cancel_delayed_work_sync(&conn->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (sk->sk_state != SMC_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* send close wr request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rc = smc_close_wr(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) sk->sk_state = SMC_PEERCLOSEWAIT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) case SMC_APPCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* passive close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!smc_cdc_rxed_any_close(conn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) smc_close_stream_wait(smc, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) cancel_delayed_work_sync(&conn->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (sk->sk_state != SMC_APPCLOSEWAIT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* confirm close from peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rc = smc_close_wr(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) sk->sk_state = SMC_APPCLOSEWAIT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case SMC_APPCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) case SMC_PEERFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case SMC_PEERCLOSEWAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) case SMC_PEERCLOSEWAIT2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) case SMC_APPFINCLOSEWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) case SMC_PROCESSABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) case SMC_PEERABORTWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* nothing to do, add tracing in future patch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (old_state != sk->sk_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Initialize close properties on connection establishment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) void smc_close_init(struct smc_sock *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) INIT_WORK(&smc->conn.close_work, smc_close_passive_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }