^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Service connection management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) .usage = ATOMIC_INIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) .debug_id = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Find a service connection under RCU conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * We could use a hash table, but that is subject to bucket stuffing by an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * attacker as the client gets to pick the epoch and cid values and would know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * the hash function. So, instead, we use a hash table for the peer and from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * that an rbtree to find the service connection. Under ordinary circumstances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * it might be slower than a large hash table, but it is at least limited in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct rxrpc_connection *conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rxrpc_conn_proto k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned int seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) k.epoch = sp->hdr.epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) k.cid = sp->hdr.cid & RXRPC_CIDMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Unfortunately, rbtree walking doesn't give reliable results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * under just the RCU read lock, so we have to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) p = rcu_dereference_raw(peer->service_conns.rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) conn = rb_entry(p, struct rxrpc_connection, service_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (conn->proto.index_key < k.index_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) p = rcu_dereference_raw(p->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) else if (conn->proto.index_key > k.index_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) p = rcu_dereference_raw(p->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) } while (need_seqretry(&peer->service_conn_lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) done_seqretry(&peer->service_conn_lock, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) _leave(" = %d", conn ? conn->debug_id : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Insert a service connection into a peer's tree, thereby making it a target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * for incoming packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rxrpc_connection *cursor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct rxrpc_conn_proto k = conn->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rb_node **pp, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) write_seqlock_bh(&peer->service_conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pp = &peer->service_conns.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) while (*pp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) parent = *pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) cursor = rb_entry(parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct rxrpc_connection, service_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (cursor->proto.index_key < k.index_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pp = &(*pp)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) else if (cursor->proto.index_key > k.index_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pp = &(*pp)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) goto found_extant_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rb_link_node_rcu(&conn->service_node, parent, pp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rb_insert_color(&conn->service_node, &peer->service_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) conn_published:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) write_sequnlock_bh(&peer->service_conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) _leave(" = %d [new]", conn->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) found_extant_conn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (atomic_read(&cursor->usage) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto replace_old_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) write_sequnlock_bh(&peer->service_conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* We should not be able to get here. rxrpc_incoming_connection() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * called in a non-reentrant context, so there can't be a race to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * insert a new connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) replace_old_connection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* The old connection is from an outdated epoch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) _debug("replace conn");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rb_replace_node_rcu(&cursor->service_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) &conn->service_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) &peer->service_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) goto conn_published;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Preallocate a service connection. The connection is placed on the proc and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * reap lists so that we don't have to get the lock from BH context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* We maintain an extra ref on the connection whilst it is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * the rxrpc_connections list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) conn->state = RXRPC_CONN_SERVICE_PREALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) atomic_set(&conn->usage, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) atomic_inc(&rxnet->nr_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) write_lock(&rxnet->conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) list_add_tail(&conn->link, &rxnet->service_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) write_unlock(&rxnet->conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) atomic_read(&conn->usage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Set up an incoming connection. This is called in BH context with the RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * read lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct rxrpc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) const struct rxrpc_security *sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) conn->proto.epoch = sp->hdr.epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) conn->params.service_id = sp->hdr.serviceId;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) conn->service_id = sp->hdr.serviceId;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) conn->security_ix = sp->hdr.securityIndex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) conn->out_clientflag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) conn->security = sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) conn->server_key = key_get(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (conn->security_ix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) conn->state = RXRPC_CONN_SERVICE_UNSECURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) conn->state = RXRPC_CONN_SERVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* See if we should upgrade the service. This can only happen on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * first packet on a new connection. Once done, it applies to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * subsequent calls on that connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) conn->service_id == rx->service_upgrade.from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) conn->service_id = rx->service_upgrade.to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Make the connection a target for incoming packets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) rxrpc_publish_service_conn(conn->params.peer, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Remove the service connection from the peer's tree, thereby removing it as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * target for incoming packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct rxrpc_peer *peer = conn->params.peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) write_seqlock_bh(&peer->service_conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rb_erase(&conn->service_node, &peer->service_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) write_sequnlock_bh(&peer->service_conn_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }