^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* AF_RXRPC implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/key-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/af_rxrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) MODULE_DESCRIPTION("RxRPC network protocol");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) MODULE_AUTHOR("Red Hat, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) MODULE_ALIAS_NETPROTO(PF_RXRPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) module_param_named(debug, rxrpc_debug, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) MODULE_PARM_DESC(debug, "RxRPC debugging mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static struct proto rxrpc_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static const struct proto_ops rxrpc_rpc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* current debugging ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) atomic_t rxrpc_debug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) EXPORT_SYMBOL(rxrpc_debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* count of skbs currently in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct workqueue_struct *rxrpc_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void rxrpc_sock_destructor(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * see if an RxRPC socket is currently writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline int rxrpc_writable(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * wait for write bufferage to become available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void rxrpc_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) _enter("%p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (rxrpc_writable(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct socket_wq *wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) wake_up_interruptible(&wq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * validate an RxRPC address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int rxrpc_validate_address(struct rxrpc_sock *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct sockaddr_rxrpc *srx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (len < sizeof(struct sockaddr_rxrpc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (srx->srx_family != AF_RXRPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (srx->transport_type != SOCK_DGRAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) len -= offsetof(struct sockaddr_rxrpc, transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (srx->transport_len < sizeof(sa_family_t) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) srx->transport_len > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (srx->transport.family != rx->family &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) srx->transport.family == AF_INET && rx->family != AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) switch (srx->transport.family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (srx->transport_len < sizeof(struct sockaddr_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_AF_RXRPC_IPV6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (srx->transport_len < sizeof(struct sockaddr_in6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) tail = offsetof(struct sockaddr_rxrpc, transport) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) sizeof(struct sockaddr_in6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (tail < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) memset((void *)srx + tail, 0, len - tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) _debug("INET: %pISp", &srx->transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * bind a local address to an RxRPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct rxrpc_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u16 service_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) _enter("%p,%p,%d", rx, saddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ret = rxrpc_validate_address(rx, srx, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) service_id = srx->srx_service;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) lock_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) switch (rx->sk.sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) case RXRPC_UNBOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) rx->srx = *srx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (IS_ERR(local)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = PTR_ERR(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (service_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) write_lock(&local->services_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (rcu_access_pointer(local->service))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) goto service_in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rx->local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) rcu_assign_pointer(local->service, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) write_unlock(&local->services_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) rx->sk.sk_state = RXRPC_SERVER_BOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) rx->local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rx->sk.sk_state = RXRPC_CLIENT_BOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) case RXRPC_SERVER_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (service_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ret = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (service_id == rx->srx.srx_service)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) srx->srx_service = rx->srx.srx_service;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rx->second_service = service_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rx->sk.sk_state = RXRPC_SERVER_BOUND2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) release_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) _leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) service_in_use:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) write_unlock(&local->services_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rxrpc_unuse_local(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rxrpc_put_local(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) release_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * set the number of pending calls permitted on a listening socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int rxrpc_listen(struct socket *sock, int backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct rxrpc_sock *rx = rxrpc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned int max, old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) _enter("%p,%d", rx, backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) lock_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) switch (rx->sk.sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) case RXRPC_UNBOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case RXRPC_SERVER_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case RXRPC_SERVER_BOUND2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ASSERT(rx->local != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) max = READ_ONCE(rxrpc_max_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (backlog == INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) backlog = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) else if (backlog < 0 || backlog > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) old = sk->sk_max_ack_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) sk->sk_max_ack_backlog = backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ret = rxrpc_service_prealloc(rx, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rx->sk.sk_state = RXRPC_SERVER_LISTENING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sk->sk_max_ack_backlog = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) case RXRPC_SERVER_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (backlog == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) sk->sk_max_ack_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) rxrpc_discard_prealloc(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) release_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @sock: The socket on which to make the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @srx: The address of the peer to contact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * @key: The security context to use (defaults to socket setting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @user_call_ID: The ID to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @tx_total_len: Total length of data to transmit during the call (or -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @gfp: The allocation constraints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @notify_rx: Where to send notifications instead of socket queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @upgrade: Request service upgrade for call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * @interruptibility: The call is interruptible, or can be canceled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * @debug_id: The debug ID for tracing to be assigned to the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Allow a kernel service to begin a call on the nominated socket. This just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * sets up all the internal tracking structures and allocates connection and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * call IDs as appropriate. The call to be used is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * The default socket destination address and security may be overridden by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * supplying @srx and @key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct sockaddr_rxrpc *srx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned long user_call_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) s64 tx_total_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) rxrpc_notify_rx_t notify_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bool upgrade,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) enum rxrpc_interruptibility interruptibility,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned int debug_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct rxrpc_conn_parameters cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct rxrpc_call_params p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct rxrpc_call *call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) _enter(",,%x,%lx", key_serial(key), user_call_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) lock_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) key = rx->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (key && !key->payload.data[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) key = NULL; /* a no-security key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) memset(&p, 0, sizeof(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) p.user_call_ID = user_call_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) p.tx_total_len = tx_total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) p.interruptibility = interruptibility;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) p.kernel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) memset(&cp, 0, sizeof(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cp.local = rx->local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) cp.key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cp.security_level = rx->min_sec_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cp.exclusive = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cp.upgrade = upgrade;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) cp.service_id = srx->srx_service;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* The socket has been unlocked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!IS_ERR(call)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) call->notify_rx = notify_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mutex_unlock(&call->user_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rxrpc_put_peer(cp.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) _leave(" = %p", call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) EXPORT_SYMBOL(rxrpc_kernel_begin_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Dummy function used to stop the notifier talking to recvmsg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long call_user_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @sock: The socket the call is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @call: The call to end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Allow a kernel service to end a call it was using. The call must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * complete before this is called (the call should be aborted if necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mutex_lock(&call->user_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rxrpc_release_call(rxrpc_sk(sock->sk), call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Make sure we're not going to call back into a kernel service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (call->notify_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) spin_lock_bh(&call->notify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) call->notify_rx = rxrpc_dummy_notify_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_bh(&call->notify_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) mutex_unlock(&call->user_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rxrpc_put_call(call, rxrpc_call_put_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) EXPORT_SYMBOL(rxrpc_kernel_end_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * rxrpc_kernel_check_life - Check to see whether a call is still alive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @sock: The socket the call is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @call: The call to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * Allow a kernel service to find out whether a call is still alive -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * ie. whether it has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bool rxrpc_kernel_check_life(const struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) const struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return call->state != RXRPC_CALL_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) EXPORT_SYMBOL(rxrpc_kernel_check_life);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @sock: The socket the call is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @call: The call to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Allow a kernel service to retrieve the epoch value from a service call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * see if the client at the other end rebooted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return call->conn->proto.epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * rxrpc_kernel_new_call_notification - Get notifications of new calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @sock: The socket to intercept received messages on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @notify_new_call: Function to be called when new calls appear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @discard_new_call: Function to discard preallocated calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Allow a kernel service to be given notifications about new calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) void rxrpc_kernel_new_call_notification(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) rxrpc_notify_new_call_t notify_new_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) rxrpc_discard_new_call_t discard_new_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rx->notify_new_call = notify_new_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rx->discard_new_call = discard_new_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * rxrpc_kernel_set_max_life - Set maximum lifespan on a call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @sock: The socket the call is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @call: The call to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @hard_timeout: The maximum lifespan of the call in jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Set the maximum lifespan of a call. The call will end with ETIME or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * ETIMEDOUT if it takes longer than this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned long hard_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned long now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mutex_lock(&call->user_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) hard_timeout += now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) WRITE_ONCE(call->expect_term_by, hard_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mutex_unlock(&call->user_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) EXPORT_SYMBOL(rxrpc_kernel_set_max_life);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * connect an RxRPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * - this just targets it at a specific destination; no actual connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * negotiation takes place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int addr_len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) _enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ret = rxrpc_validate_address(rx, srx, addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) _leave(" = %d [bad addr]", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) lock_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) switch (rx->sk.sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) case RXRPC_UNBOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) case RXRPC_CLIENT_UNBOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case RXRPC_CLIENT_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) rx->connect_srx = *srx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) release_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * send a message through an RxRPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * - in a client this does a number of things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * - finds/sets up a connection for the security specified (if any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * - initiates a call (ID in control data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * - ends the request phase of a call (if MSG_MORE is not set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * - sends a call data packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * - may send an abort (abort code in control data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct rxrpc_local *local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) _enter(",{%d},,%zu", rx->sk.sk_state, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (m->msg_flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (m->msg_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) _leave(" = %d [bad addr]", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) lock_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) switch (rx->sk.sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case RXRPC_UNBOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) case RXRPC_CLIENT_UNBOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) rx->srx.srx_family = AF_RXRPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rx->srx.srx_service = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) rx->srx.transport_type = SOCK_DGRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rx->srx.transport.family = rx->family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) switch (rx->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) rx->srx.transport_len = sizeof(struct sockaddr_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #ifdef CONFIG_AF_RXRPC_IPV6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rx->srx.transport_len = sizeof(struct sockaddr_in6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (IS_ERR(local)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ret = PTR_ERR(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rx->local = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) rx->sk.sk_state = RXRPC_CLIENT_BOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case RXRPC_CLIENT_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!m->msg_name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) m->msg_name = &rx->connect_srx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) m->msg_namelen = sizeof(rx->connect_srx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) case RXRPC_SERVER_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) case RXRPC_SERVER_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = rxrpc_do_sendmsg(rx, m, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* The socket has been unlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) goto error_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) error_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) release_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) _leave(" = %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (sk->sk_state != RXRPC_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (val > RXRPC_SECURITY_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rxrpc_sk(sk)->min_sec_level = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) EXPORT_SYMBOL(rxrpc_sock_set_min_security_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * set RxRPC socket options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) sockptr_t optval, unsigned int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned int min_sec_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u16 service_upgrade[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) _enter(",%d,%d,,%d", level, optname, optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) lock_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (level == SOL_RXRPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) case RXRPC_EXCLUSIVE_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (optlen != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ret = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (rx->sk.sk_state != RXRPC_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) rx->exclusive = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) case RXRPC_SECURITY_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (rx->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ret = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (rx->sk.sk_state != RXRPC_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = rxrpc_request_key(rx, optval, optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) case RXRPC_SECURITY_KEYRING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (rx->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ret = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (rx->sk.sk_state != RXRPC_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ret = rxrpc_server_keyring(rx, optval, optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case RXRPC_MIN_SECURITY_LEVEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (optlen != sizeof(unsigned int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ret = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (rx->sk.sk_state != RXRPC_UNBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = copy_from_sockptr(&min_sec_level, optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (min_sec_level > RXRPC_SECURITY_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) rx->min_sec_level = min_sec_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case RXRPC_UPGRADEABLE_SERVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (optlen != sizeof(service_upgrade) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) rx->service_upgrade.from != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ret = -EISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (copy_from_sockptr(service_upgrade, optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) sizeof(service_upgrade)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if ((service_upgrade[0] != rx->srx.srx_service ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) service_upgrade[1] != rx->second_service) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) (service_upgrade[0] != rx->second_service ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) service_upgrade[1] != rx->srx.srx_service))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rx->service_upgrade.from = service_upgrade[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) rx->service_upgrade.to = service_upgrade[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) release_sock(&rx->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * Get socket options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) char __user *optval, int __user *_optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (level != SOL_RXRPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (get_user(optlen, _optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) case RXRPC_SUPPORTED_CMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (optlen < sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return -ETOOSMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) put_user(sizeof(int), _optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * permit an RxRPC socket to be polled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct rxrpc_sock *rx = rxrpc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) __poll_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) sock_poll_wait(file, sock, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* the socket is readable if there are any messages waiting on the Rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!list_empty(&rx->recvmsg_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* the socket is writable if there is space to add new data to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * socket; there is no guarantee that any particular call in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * on the socket may have space in the Tx ACK window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (rxrpc_writable(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mask |= EPOLLOUT | EPOLLWRNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * create an RxRPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct rxrpc_net *rxnet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct rxrpc_sock *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) _enter("%p,%d", sock, protocol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* we support transport protocol UDP/UDP6 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (protocol != PF_INET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (sock->type != SOCK_DGRAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) sock->ops = &rxrpc_rpc_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) sock->state = SS_UNCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) sock_set_flag(sk, SOCK_RCU_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) sk->sk_state = RXRPC_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sk->sk_write_space = rxrpc_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) sk->sk_max_ack_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) sk->sk_destruct = rxrpc_sock_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) rx = rxrpc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rx->family = protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) rx->calls = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) spin_lock_init(&rx->incoming_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) INIT_LIST_HEAD(&rx->sock_calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) INIT_LIST_HEAD(&rx->to_be_accepted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) INIT_LIST_HEAD(&rx->recvmsg_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) rwlock_init(&rx->recvmsg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) rwlock_init(&rx->call_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) memset(&rx->srx, 0, sizeof(rx->srx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) rxnet = rxrpc_net(sock_net(&rx->sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) _leave(" = 0 [%p]", rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Kill all the calls on a socket and shut it down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static int rxrpc_shutdown(struct socket *sock, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct rxrpc_sock *rx = rxrpc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) _enter("%p,%d", sk, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (flags != SHUT_RDWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (sk->sk_state == RXRPC_CLOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) spin_lock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (sk->sk_state < RXRPC_CLOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) sk->sk_state = RXRPC_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) sk->sk_shutdown = SHUTDOWN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ret = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) spin_unlock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) rxrpc_discard_prealloc(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * RxRPC socket destructor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static void rxrpc_sock_destructor(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) _enter("%p", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) rxrpc_purge_queue(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) WARN_ON(refcount_read(&sk->sk_wmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) WARN_ON(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) WARN_ON(sk->sk_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!sock_flag(sk, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) printk("Attempt to release alive rxrpc socket: %p\n", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * release an RxRPC socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) static int rxrpc_release_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct rxrpc_sock *rx = rxrpc_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* declare the socket closed for business */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) sk->sk_shutdown = SHUTDOWN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* We want to kill off all connections from a service socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * as fast as possible because we can't share these; client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * sockets, on the other hand, can share an endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) case RXRPC_SERVER_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case RXRPC_SERVER_BOUND2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) case RXRPC_SERVER_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) case RXRPC_SERVER_LISTEN_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) rx->local->service_closed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) spin_lock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) sk->sk_state = RXRPC_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) spin_unlock_bh(&sk->sk_receive_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) write_lock(&rx->local->services_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) rcu_assign_pointer(rx->local->service, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) write_unlock(&rx->local->services_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* try to flush out this socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) rxrpc_discard_prealloc(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) rxrpc_release_calls_on_socket(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) flush_workqueue(rxrpc_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) rxrpc_purge_queue(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) rxrpc_unuse_local(rx->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) rxrpc_put_local(rx->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) rx->local = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) key_put(rx->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rx->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) key_put(rx->securities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) rx->securities = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) _leave(" = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * release an RxRPC BSD socket on close() or equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static int rxrpc_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) _enter("%p{%p}", sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sock->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return rxrpc_release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * RxRPC network protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static const struct proto_ops rxrpc_rpc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .family = PF_RXRPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) .release = rxrpc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) .bind = rxrpc_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .connect = rxrpc_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .socketpair = sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) .accept = sock_no_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .getname = sock_no_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .poll = rxrpc_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .ioctl = sock_no_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .listen = rxrpc_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .shutdown = rxrpc_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .setsockopt = rxrpc_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .getsockopt = rxrpc_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .sendmsg = rxrpc_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .recvmsg = rxrpc_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .sendpage = sock_no_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static struct proto rxrpc_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .name = "RXRPC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) .obj_size = sizeof(struct rxrpc_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) .max_header = sizeof(struct rxrpc_wire_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static const struct net_proto_family rxrpc_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .family = PF_RXRPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .create = rxrpc_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * initialise and register the RxRPC protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static int __init af_rxrpc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) get_random_bytes(&tmp, sizeof(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) tmp &= 0x3fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (tmp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) tmp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) idr_set_cursor(&rxrpc_client_conn_ids, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) rxrpc_call_jar = kmem_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (!rxrpc_call_jar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) pr_notice("Failed to allocate call jar\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto error_call_jar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!rxrpc_workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) pr_notice("Failed to allocate work queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto error_work_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ret = rxrpc_init_security();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) pr_crit("Cannot initialise security\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) goto error_security;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ret = register_pernet_device(&rxrpc_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto error_pernet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ret = proto_register(&rxrpc_proto, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pr_crit("Cannot register protocol\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) goto error_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ret = sock_register(&rxrpc_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) pr_crit("Cannot register socket family\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto error_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ret = register_key_type(&key_type_rxrpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pr_crit("Cannot register client key type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto error_key_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = register_key_type(&key_type_rxrpc_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) pr_crit("Cannot register server key type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) goto error_key_type_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ret = rxrpc_sysctl_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) pr_crit("Cannot register sysctls\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto error_sysctls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) error_sysctls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) unregister_key_type(&key_type_rxrpc_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) error_key_type_s:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) unregister_key_type(&key_type_rxrpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) error_key_type:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) sock_unregister(PF_RXRPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) error_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) proto_unregister(&rxrpc_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) error_proto:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unregister_pernet_device(&rxrpc_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) error_pernet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) rxrpc_exit_security();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) error_security:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) destroy_workqueue(rxrpc_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) error_work_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) kmem_cache_destroy(rxrpc_call_jar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) error_call_jar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * unregister the RxRPC protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void __exit af_rxrpc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) rxrpc_sysctl_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) unregister_key_type(&key_type_rxrpc_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) unregister_key_type(&key_type_rxrpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) sock_unregister(PF_RXRPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) proto_unregister(&rxrpc_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) unregister_pernet_device(&rxrpc_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* Make sure the local and peer records pinned by any dying connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * are released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) rxrpc_destroy_client_conn_ids();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) destroy_workqueue(rxrpc_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) rxrpc_exit_security();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) kmem_cache_destroy(rxrpc_call_jar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) module_init(af_rxrpc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) module_exit(af_rxrpc_exit);