Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* Client connection-specific management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Client connections need to be cached for a little while after they've made a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * call so as to handle retransmitted DATA packets in case the server didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * receive the final ACK or terminating ABORT we sent it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * There are flags of relevance to the cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *      should not be reused.  This is set when an exclusive connection is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *      or a call ID counter overflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * The caching state may only be changed if the cache lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * There are two idle client connection expiry durations.  If the total number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * of connections is below the reap threshold, we use the normal duration; if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * it's above, we use the fast duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) __read_mostly unsigned int rxrpc_reap_client_connections = 900;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * We use machine-unique IDs for our client connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) DEFINE_IDR(rxrpc_client_conn_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * Get a connection ID and epoch for a client connection from the global pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * The connection struct pointer is then recorded in the idr radix tree.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * epoch doesn't change until the client is rebooted (or, at least, unless the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * module is unloaded).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 					  gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	idr_preload(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	spin_lock(&rxrpc_conn_id_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 			      1, 0x40000000, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	if (id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	spin_unlock(&rxrpc_conn_id_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	conn->proto.epoch = rxnet->epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	conn->proto.cid = id << RXRPC_CIDSHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	_leave(" [CID %x]", conn->proto.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	spin_unlock(&rxrpc_conn_id_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	_leave(" = %d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * Release a connection ID for a client connection from the global pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		spin_lock(&rxrpc_conn_id_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		idr_remove(&rxrpc_client_conn_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			   conn->proto.cid >> RXRPC_CIDSHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		spin_unlock(&rxrpc_conn_id_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * Destroy the client connection ID tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) void rxrpc_destroy_client_conn_ids(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 			       conn, atomic_read(&conn->usage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	idr_destroy(&rxrpc_client_conn_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * Allocate a connection bundle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 					       gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct rxrpc_bundle *bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	bundle = kzalloc(sizeof(*bundle), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	if (bundle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		bundle->params = *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		rxrpc_get_peer(bundle->params.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		atomic_set(&bundle->usage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		spin_lock_init(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		INIT_LIST_HEAD(&bundle->waiting_calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	return bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	atomic_inc(&bundle->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	return bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	rxrpc_put_peer(bundle->params.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	kfree(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	unsigned int d = bundle->debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	unsigned int u = atomic_dec_return(&bundle->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	_debug("PUT B=%x %u", d, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	if (u == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		rxrpc_free_bundle(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * Allocate a client connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static struct rxrpc_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	conn = rxrpc_alloc_connection(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		_leave(" = -ENOMEM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	atomic_set(&conn->usage, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	conn->bundle		= bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	conn->params		= bundle->params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	conn->state		= RXRPC_CONN_CLIENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	conn->service_id	= conn->params.service_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	ret = rxrpc_get_client_connection_id(conn, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		goto error_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	ret = rxrpc_init_client_conn_security(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		goto error_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	ret = conn->security->prime_packet_security(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		goto error_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	atomic_inc(&rxnet->nr_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	write_lock(&rxnet->conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	write_unlock(&rxnet->conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	rxrpc_get_bundle(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	rxrpc_get_peer(conn->params.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	rxrpc_get_local(conn->params.local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	key_get(conn->params.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			 atomic_read(&conn->usage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			 __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	atomic_inc(&rxnet->nr_client_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	_leave(" = %p", conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	return conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) error_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	conn->security->clear(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) error_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	rxrpc_put_client_connection_id(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) error_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	kfree(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	_leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * Determine if a connection may be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct rxrpc_net *rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	int id_cursor, id, distance, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		goto dont_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	rxnet = conn->params.local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		goto dont_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	if (conn->state != RXRPC_CONN_CLIENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	    conn->proto.epoch != rxnet->epoch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		goto mark_dont_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	/* The IDR tree gets very expensive on memory if the connection IDs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	 * widely scattered throughout the number space, so we shall want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	 * kill off connections that, say, have an ID more than about four
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * times the maximum number of client conns away from the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 * allocation point to try and keep the IDs concentrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	distance = id - id_cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (distance < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		distance = -distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (distance > limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		goto mark_dont_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) mark_dont_reuse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) dont_reuse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * Look up the conn bundle that matches the connection parameters, adding it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  * it doesn't yet exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 						 gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	static atomic_t rxrpc_bundle_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct rxrpc_bundle *bundle, *candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct rxrpc_local *local = cp->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct rb_node *p, **pp, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	long diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	_enter("{%px,%x,%u,%u}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	       cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	if (cp->exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		return rxrpc_alloc_bundle(cp, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	/* First, see if the bundle is already there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	_debug("search 1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	spin_lock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	p = local->client_bundles.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		bundle = rb_entry(p, struct rxrpc_bundle, local_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) #define cmp(X) ((long)bundle->params.X - (long)cp->X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		diff = (cmp(peer) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			cmp(key) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			cmp(security_level) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			cmp(upgrade));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) #undef cmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		if (diff < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			p = p->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		else if (diff > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			p = p->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			goto found_bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	spin_unlock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	_debug("not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/* It wasn't.  We need to add one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	candidate = rxrpc_alloc_bundle(cp, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (!candidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	_debug("search 2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	spin_lock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	pp = &local->client_bundles.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	while (*pp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		parent = *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) #define cmp(X) ((long)bundle->params.X - (long)cp->X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		diff = (cmp(peer) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			cmp(key) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			cmp(security_level) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			cmp(upgrade));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) #undef cmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (diff < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			pp = &(*pp)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		else if (diff > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			pp = &(*pp)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 			goto found_bundle_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	_debug("new bundle");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	rb_link_node(&candidate->local_node, parent, pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	rb_insert_color(&candidate->local_node, &local->client_bundles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	rxrpc_get_bundle(candidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	spin_unlock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	_leave(" = %u [new]", candidate->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	return candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) found_bundle_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	rxrpc_free_bundle(candidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) found_bundle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	rxrpc_get_bundle(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	spin_unlock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	_leave(" = %u [found]", bundle->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	return bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  * Create or find a client bundle to use for a call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * If we return with a connection, the call will be on its waiting list.  It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * left to the caller to assign a channel and wake up the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 					    struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 					    struct rxrpc_conn_parameters *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 					    struct sockaddr_rxrpc *srx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 					    gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct rxrpc_bundle *bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (!cp->peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	call->cong_cwnd = cp->peer->cong_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (call->cong_cwnd >= call->cong_ssthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		call->cong_mode = RXRPC_CALL_SLOW_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (cp->upgrade)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		__set_bit(RXRPC_CALL_UPGRADE, &call->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	/* Find the client connection bundle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	bundle = rxrpc_look_up_bundle(cp, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (!bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/* Get this call queued.  Someone else may activate it whilst we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	 * lining up a new connection, but that's fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	_leave(" = [B=%x]", bundle->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	return bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	_leave(" = -ENOMEM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * Allocate a new connection and add it into a bundle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	__releases(bundle->channel_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct rxrpc_connection *candidate = NULL, *old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	bool conflict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	conflict = bundle->alloc_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (!conflict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		bundle->alloc_conn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (conflict) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		_leave(" [conf]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	candidate = rxrpc_alloc_client_connection(bundle, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	bundle->alloc_conn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	if (IS_ERR(candidate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		bundle->alloc_error = PTR_ERR(candidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		_leave(" [err %ld]", PTR_ERR(candidate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	bundle->alloc_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		unsigned int shift = i * RXRPC_MAXCALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		old = bundle->conns[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (!rxrpc_may_reuse_conn(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 				trace_rxrpc_client(old, -1, rxrpc_client_replace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			candidate->bundle_shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			bundle->conns[i] = candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			for (j = 0; j < RXRPC_MAXCALLS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 				set_bit(shift + j, &bundle->avail_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			candidate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (candidate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		_debug("discard C=%x", candidate->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		rxrpc_put_connection(candidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	rxrpc_put_connection(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * Add a connection to a bundle if there are no usable connections or we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * connections waiting for extra capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct rxrpc_call *call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	int i, usable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	/* See if there are any usable connections. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	usable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		if (rxrpc_may_reuse_conn(bundle->conns[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			usable++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	if (!usable && !list_empty(&bundle->waiting_calls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		call = list_first_entry(&bundle->waiting_calls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 					struct rxrpc_call, chan_wait_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			bundle->try_upgrade = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (!usable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		goto alloc_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (!bundle->avail_chans &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	    !bundle->try_upgrade &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	    !list_empty(&bundle->waiting_calls) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	    usable < ARRAY_SIZE(bundle->conns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		goto alloc_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) alloc_conn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	return rxrpc_add_conn_to_bundle(bundle, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * Assign a channel to the call at the front of the queue and wake the call up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * We don't increment the callNumber counter until this number has been exposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * to the world.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				       unsigned int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct rxrpc_channel *chan = &conn->channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct rxrpc_bundle *bundle = conn->bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 					     struct rxrpc_call, chan_wait_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	u32 call_id = chan->call_counter + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	_enter("C=%x,%u", conn->debug_id, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	/* Cancel the final ACK on the previous call if it hasn't been sent yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * as the DATA packet will implicitly ACK it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	rxrpc_see_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	list_del_init(&call->chan_wait_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	call->peer	= rxrpc_get_peer(conn->params.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	call->conn	= rxrpc_get_connection(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	call->cid	= conn->proto.cid | channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	call->call_id	= call_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	call->security	= conn->security;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	call->security_ix = conn->security_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	call->service_id = conn->service_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	trace_rxrpc_connect_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	_net("CONNECT call %08x:%08x as call %d on conn %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	write_lock_bh(&call->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	write_unlock_bh(&call->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	/* Paired with the read barrier in rxrpc_connect_call().  This orders
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	 * cid and epoch in the connection wrt to call_id without the need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * take the channel_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	 * We provisionally assign a callNumber at this point, but we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 * confirm it until the call is about to be exposed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 * TODO: Pair with a barrier in the data_ready handler when that looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 * at the call ID through a connection channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	chan->call_id		= call_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	chan->call_debug_id	= call->debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	rcu_assign_pointer(chan->call, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	wake_up(&call->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * Remove a connection from the idle list if it's on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	bool drop_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (!list_empty(&conn->cache_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		drop_ref = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		spin_lock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		if (!list_empty(&conn->cache_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			list_del_init(&conn->cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			drop_ref = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		spin_unlock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		if (drop_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			rxrpc_put_connection(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * Assign channels and callNumbers to waiting calls with channel_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * held by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	unsigned long avail, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	unsigned int channel, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (bundle->try_upgrade)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		mask = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	while (!list_empty(&bundle->waiting_calls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		avail = bundle->avail_chans & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		if (!avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		channel = __ffs(avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		clear_bit(channel, &bundle->avail_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		slot = channel / RXRPC_MAXCALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		conn = bundle->conns[slot];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		if (bundle->try_upgrade)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		rxrpc_unidle_conn(bundle, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		channel &= (RXRPC_MAXCALLS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		conn->act_chans	|= 1 << channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		rxrpc_activate_one_channel(conn, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * Assign channels and callNumbers to waiting calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	_enter("B=%x", bundle->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (!bundle->avail_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	rxrpc_activate_channels_locked(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * Wait for a callNumber and a channel to be granted to a call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				  struct rxrpc_call *call, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	DECLARE_WAITQUEUE(myself, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	_enter("%d", call->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	if (!gfpflags_allow_blocking(gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		rxrpc_maybe_add_conn(bundle, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		rxrpc_activate_channels(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		ret = bundle->alloc_error ?: -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	add_wait_queue_exclusive(&call->waitq, &myself);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		rxrpc_maybe_add_conn(bundle, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		rxrpc_activate_channels(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		ret = bundle->alloc_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		switch (call->interruptibility) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		case RXRPC_INTERRUPTIBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		case RXRPC_PREINTERRUPTIBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		case RXRPC_UNINTERRUPTIBLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		     call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		    signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	remove_wait_queue(&call->waitq, &myself);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	_leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * find a connection for a call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * - called in process context with IRQs enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) int rxrpc_connect_call(struct rxrpc_sock *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		       struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		       struct rxrpc_conn_parameters *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		       struct sockaddr_rxrpc *srx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		       gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	struct rxrpc_bundle *bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	struct rxrpc_net *rxnet = cp->local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (IS_ERR(bundle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		ret = PTR_ERR(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		ret = rxrpc_wait_for_channel(bundle, call, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			goto wait_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) granted_channel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) out_put_bundle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	rxrpc_put_bundle(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	_leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) wait_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	list_del_init(&call->chan_wait_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		goto granted_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	rxrpc_disconnect_client_call(bundle, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	goto out_put_bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * Note that a call, and thus a connection, is about to be exposed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * world.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) void rxrpc_expose_client_call(struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct rxrpc_connection *conn = call->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct rxrpc_channel *chan = &conn->channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		/* Mark the call ID as being used.  If the callNumber counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		 * exceeds ~2 billion, we kill the connection after its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		 * outstanding calls have finished so that the counter doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		 * wrap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		chan->call_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		if (chan->call_counter >= INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * Set the reap timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	if (!rxnet->kill_all_client_conns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		if (rxnet->live)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  * Disconnect a client call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct rxrpc_channel *chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	unsigned int channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	bool may_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	_enter("c=%x", call->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	/* Calls that have never actually been assigned a channel can simply be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 * discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	conn = call->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (!conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		_debug("call is waiting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		ASSERTCMP(call->call_id, ==, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		list_del_init(&call->chan_wait_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	cid = call->cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	channel = cid & RXRPC_CHANNELMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	chan = &conn->channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (rcu_access_pointer(chan->call) != call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	may_reuse = rxrpc_may_reuse_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	/* If a client call was exposed to the world, we save the result for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * We use a barrier here so that the call number and abort code can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * read without needing to take a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * TODO: Make the incoming packet handler check this and handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * terminal retransmission without requiring access to the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		_debug("exposed %u,%u", call->call_id, call->abort_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		__rxrpc_disconnect_call(conn, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			bundle->try_upgrade = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			if (may_reuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				rxrpc_activate_channels_locked(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	/* See if we can pass the channel directly to another call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (may_reuse && !list_empty(&bundle->waiting_calls)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		rxrpc_activate_one_channel(conn, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	/* Schedule the final ACK to be transmitted in a short while so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	 * can be skipped if we find a follow-on call.  The first DATA packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 * of the follow on call will implicitly ACK this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (call->completion == RXRPC_CALL_SUCCEEDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		unsigned long final_ack_at = jiffies + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		WRITE_ONCE(chan->final_ack_at, final_ack_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		rxrpc_reduce_conn_timer(conn, final_ack_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	/* Deactivate the channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	rcu_assign_pointer(chan->call, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	conn->act_chans	&= ~(1 << channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* If no channels remain active, then put the connection on the idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * list for a short while.  Give it a ref to stop it going away if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * becomes unbundled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (!conn->act_chans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		conn->idle_timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		rxrpc_get_connection(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		spin_lock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		spin_unlock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		rxrpc_set_client_reap_timer(rxnet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * Remove a connection from a bundle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct rxrpc_bundle *bundle = conn->bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct rxrpc_local *local = bundle->params.local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	unsigned int bindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	bool need_drop = false, need_put = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	_enter("C=%x", conn->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		rxrpc_process_delayed_final_acks(conn, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	spin_lock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	bindex = conn->bundle_shift / RXRPC_MAXCALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (bundle->conns[bindex] == conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		_debug("clear slot %u", bindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		bundle->conns[bindex] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		for (i = 0; i < RXRPC_MAXCALLS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		need_drop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	spin_unlock(&bundle->channel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	/* If there are no more connections, remove the bundle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (!bundle->avail_chans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		_debug("maybe unbundle");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		spin_lock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			if (bundle->conns[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			_debug("erase bundle");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			rb_erase(&bundle->local_node, &local->client_bundles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			need_put = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		spin_unlock(&local->client_bundles_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		if (need_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			rxrpc_put_bundle(bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (need_drop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		rxrpc_put_connection(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  * Clean up a dead client connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct rxrpc_local *local = conn->params.local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	struct rxrpc_net *rxnet = local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	_enter("C=%x", conn->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	atomic_dec(&rxnet->nr_client_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	rxrpc_put_client_connection_id(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	rxrpc_kill_connection(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * Clean up a dead client connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) void rxrpc_put_client_conn(struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	const void *here = __builtin_return_address(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	unsigned int debug_id = conn->debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	n = atomic_dec_return(&conn->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (n <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		ASSERTCMP(n, >=, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		rxrpc_kill_client_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  * Discard expired client connections from the idle list.  Each conn in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  * idle list has been exposed and holds an extra ref because of that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  * This may be called from conn setup or from a work item so cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  * considered non-reentrant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) void rxrpc_discard_expired_client_conns(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	struct rxrpc_net *rxnet =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		container_of(work, struct rxrpc_net, client_conn_reaper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	unsigned long expiry, conn_expires_at, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	unsigned int nr_conns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (list_empty(&rxnet->idle_client_conns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		_leave(" [empty]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	/* Don't double up on the discarding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		_leave(" [already]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	/* We keep an estimate of what the number of conns ought to be after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 * we've discarded some so that we don't overdo the discarding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	nr_conns = atomic_read(&rxnet->nr_client_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	spin_lock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (list_empty(&rxnet->idle_client_conns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	conn = list_entry(rxnet->idle_client_conns.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			  struct rxrpc_connection, cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (!rxnet->kill_all_client_conns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		/* If the number of connections is over the reap limit, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		 * expedite discard by reducing the expiry timeout.  We must,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		 * however, have at least a short grace period to be able to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		 * final-ACK or ABORT retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		expiry = rxrpc_conn_idle_client_expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		if (nr_conns > rxrpc_reap_client_connections)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			expiry = rxrpc_conn_idle_client_fast_expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		if (conn->params.local->service_closed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			expiry = rxrpc_closed_conn_expiry * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		conn_expires_at = conn->idle_timestamp + expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		now = READ_ONCE(jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		if (time_after(conn_expires_at, now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			goto not_yet_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	list_del_init(&conn->cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	spin_unlock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	rxrpc_unbundle_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	rxrpc_put_connection(conn); /* Drop the ->cache_link ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	nr_conns--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) not_yet_expired:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/* The connection at the front of the queue hasn't yet expired, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	 * schedule the work item for that point if we discarded something.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 * We don't worry if the work item is already scheduled - it can look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 * after rescheduling itself at a later time.  We could cancel it, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 * then things get messier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	_debug("not yet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (!rxnet->kill_all_client_conns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	spin_unlock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	spin_unlock(&rxnet->client_conn_discard_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)  * Preemptively destroy all the client connection records rather than waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)  * for them to time out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	spin_lock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	rxnet->kill_all_client_conns = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	spin_unlock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	del_timer_sync(&rxnet->client_conn_reap_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		_debug("destroy: queue failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * Clean up the client connections on a local endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct rxrpc_connection *conn, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	struct rxrpc_net *rxnet = local->rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	LIST_HEAD(graveyard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	spin_lock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 				 cache_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		if (conn->params.local == local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			trace_rxrpc_client(conn, -1, rxrpc_client_discard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			list_move(&conn->cache_link, &graveyard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	spin_unlock(&rxnet->client_conn_cache_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	while (!list_empty(&graveyard)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		conn = list_entry(graveyard.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 				  struct rxrpc_connection, cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		list_del_init(&conn->cache_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		rxrpc_unbundle_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		rxrpc_put_connection(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	_leave(" [culled]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }