Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * INET		An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *		operating system.  INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *		interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *		Generic INET transport hashtables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Authors:	Lotsa people, from code originally in tcp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <net/inet_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <net/inet_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <net/inet6_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <net/secure_seq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <net/sock_reuseport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			const __u16 lport, const __be32 faddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			const __be16 fport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	static u32 inet_ehash_secret __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	return __inet_ehashfn(laddr, lport, faddr, fport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			      inet_ehash_secret + net_hash_mix(net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /* This function handles inet_sock, but also timewait and request sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * for IPv4/IPv6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static u32 sk_ehashfn(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (sk->sk_family == AF_INET6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		return inet6_ehashfn(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				     &sk->sk_v6_rcv_saddr, sk->sk_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 				     &sk->sk_v6_daddr, sk->sk_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	return inet_ehashfn(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			    sk->sk_rcv_saddr, sk->sk_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			    sk->sk_daddr, sk->sk_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Allocate and initialize a new local port bind bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * The bindhash mutex for snum's hash chain must be held here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 						 struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 						 struct inet_bind_hashbucket *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 						 const unsigned short snum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 						 int l3mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		write_pnet(&tb->ib_net, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		tb->l3mdev    = l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		tb->port      = snum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		tb->fastreuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		tb->fastreuseport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		INIT_HLIST_HEAD(&tb->owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		hlist_add_head(&tb->node, &head->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	return tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * Caller must hold hashbucket lock for this tb with local BH disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (hlist_empty(&tb->owners)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		__hlist_del(&tb->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		kmem_cache_free(cachep, tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		    const unsigned short snum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	inet_sk(sk)->inet_num = snum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	sk_add_bind_node(sk, &tb->owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	inet_csk(sk)->icsk_bind_hash = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Get rid of any references to a local port held by the given sock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void __inet_put_port(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			hashinfo->bhash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct inet_bind_bucket *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	spin_lock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	tb = inet_csk(sk)->icsk_bind_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	__sk_del_bind_node(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	inet_csk(sk)->icsk_bind_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	inet_sk(sk)->inet_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void inet_put_port(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	__inet_put_port(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) EXPORT_SYMBOL(inet_put_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int __inet_inherit_port(const struct sock *sk, struct sock *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsigned short port = inet_sk(child)->inet_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	const int bhash = inet_bhashfn(sock_net(sk), port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			table->bhash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct inet_bind_hashbucket *head = &table->bhash[bhash];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct inet_bind_bucket *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	int l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	spin_lock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	tb = inet_csk(sk)->icsk_bind_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (unlikely(!tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (tb->port != port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		l3mdev = inet_sk_bound_l3mdev(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		/* NOTE: using tproxy and redirecting skbs to a proxy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		 * on a different listener port breaks the assumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		 * that the listener socket's icsk_bind_hash is the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		 * as that of the child socket. We have to look up or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		 * create a new bind bucket for the child here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		inet_bind_bucket_for_each(tb, &head->chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			if (net_eq(ib_net(tb), sock_net(sk)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			    tb->l3mdev == l3mdev && tb->port == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		if (!tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 						     sock_net(sk), head, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 						     l3mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			if (!tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		inet_csk_update_fastreuse(tb, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	inet_bind_hash(child, tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) EXPORT_SYMBOL_GPL(__inet_inherit_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static struct inet_listen_hashbucket *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		hash = ipv6_portaddr_hash(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 					  &sk->sk_v6_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 					  inet_sk(sk)->inet_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		hash = ipv4_portaddr_hash(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 					  inet_sk(sk)->inet_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 					  inet_sk(sk)->inet_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return inet_lhash2_bucket(h, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct inet_listen_hashbucket *ilb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (!h->lhash2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	ilb2 = inet_lhash2_bucket_sk(h, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	spin_lock(&ilb2->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (sk->sk_reuseport && sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				   &ilb2->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 				   &ilb2->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	ilb2->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	spin_unlock(&ilb2->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	struct inet_listen_hashbucket *ilb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (!h->lhash2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	    WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	ilb2 = inet_lhash2_bucket_sk(h, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	spin_lock(&ilb2->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	ilb2->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	spin_unlock(&ilb2->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline int compute_score(struct sock *sk, struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 				const unsigned short hnum, const __be32 daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				const int dif, const int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	int score = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			!ipv6_only_sock(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		if (sk->sk_rcv_saddr != daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		score =  sk->sk_bound_dev_if ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (sk->sk_family == PF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			score++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			score++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	return score;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 					    struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 					    __be32 saddr, __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 					    __be32 daddr, unsigned short hnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct sock *reuse_sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	u32 phash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (sk->sk_reuseport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		phash = inet_ehashfn(net, daddr, hnum, saddr, sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return reuse_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * Here are some nice properties to exploit here. The BSD API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * does not allow a listening sock to specify the remote port nor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * remote address for the connection. So always assume those are both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * wildcarded during the search since they can never be otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* called with rcu_read_lock() : No refcount taken on the socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static struct sock *inet_lhash2_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 				struct inet_listen_hashbucket *ilb2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 				struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				const __be32 saddr, __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				const __be32 daddr, const unsigned short hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				const int dif, const int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct inet_connection_sock *icsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct sock *sk, *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	int score, hiscore = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		sk = (struct sock *)icsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		score = compute_score(sk, net, hnum, daddr, dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		if (score > hiscore) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			result = lookup_reuseport(net, sk, skb, doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 						  saddr, sport, daddr, hnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			result = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			hiscore = score;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static inline struct sock *inet_lookup_run_bpf(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 					       struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 					       struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 					       __be32 saddr, __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 					       __be32 daddr, u16 hnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct sock *sk, *reuse_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	bool no_reuseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (hashinfo != &tcp_hashinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		return NULL; /* only TCP is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 					    saddr, sport, daddr, hnum, &sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (no_reuseport || IS_ERR_OR_NULL(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (reuse_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		sk = reuse_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct sock *__inet_lookup_listener(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				    struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				    struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 				    const __be32 saddr, __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				    const __be32 daddr, const unsigned short hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				    const int dif, const int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct inet_listen_hashbucket *ilb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	struct sock *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	unsigned int hash2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	/* Lookup redirect from BPF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					     saddr, sport, daddr, hnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 				    saddr, sport, daddr, hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				    dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	/* Lookup lhash2 with INADDR_ANY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 				    saddr, sport, htonl(INADDR_ANY), hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 				    dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (IS_ERR(result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) EXPORT_SYMBOL_GPL(__inet_lookup_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* All sockets share common refcount, but have different destructors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void sock_gen_put(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (!refcount_dec_and_test(&sk->sk_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (sk->sk_state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		inet_twsk_free(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		reqsk_free(inet_reqsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		sk_free(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) EXPORT_SYMBOL_GPL(sock_gen_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void sock_edemux(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	sock_gen_put(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) EXPORT_SYMBOL(sock_edemux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct sock *__inet_lookup_established(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 				  struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 				  const __be32 saddr, const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 				  const __be32 daddr, const u16 hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 				  const int dif, const int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	INET_ADDR_COOKIE(acookie, saddr, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	const struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	/* Optimize here for direct hit, only listening connections can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	 * have wildcards anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	unsigned int slot = hash & hashinfo->ehash_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) begin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		if (sk->sk_hash != hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		if (likely(INET_MATCH(sk, net, acookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 				      saddr, daddr, ports, dif, sdif))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			if (unlikely(!INET_MATCH(sk, net, acookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 						 saddr, daddr, ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 						 dif, sdif))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 				sock_gen_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	 * if the nulls value we got at the end of this lookup is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	 * not the expected one, we must restart lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	 * We probably met an item that was moved to another chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	if (get_nulls_value(node) != slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		goto begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) EXPORT_SYMBOL_GPL(__inet_lookup_established);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* called with local bh disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int __inet_check_established(struct inet_timewait_death_row *death_row,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				    struct sock *sk, __u16 lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 				    struct inet_timewait_sock **twp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct inet_hashinfo *hinfo = death_row->hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	__be32 daddr = inet->inet_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	__be32 saddr = inet->inet_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	int dif = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	int sdif = l3mdev_master_ifindex_by_index(net, dif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	INET_ADDR_COOKIE(acookie, saddr, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	unsigned int hash = inet_ehashfn(net, daddr, lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 					 saddr, inet->inet_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	struct sock *sk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	const struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	struct inet_timewait_sock *tw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	sk_nulls_for_each(sk2, node, &head->chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (sk2->sk_hash != hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		if (likely(INET_MATCH(sk2, net, acookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 					 saddr, daddr, ports, dif, sdif))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			if (sk2->sk_state == TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 				tw = inet_twsk(sk2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 				if (twsk_unique(sk, sk2, twp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			goto not_unique;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	/* Must record num and sport now. Otherwise we will see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	 * in hash table socket with a funny identity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	inet->inet_num = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	inet->inet_sport = htons(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	sk->sk_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	WARN_ON(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	__sk_nulls_add_node_rcu(sk, &head->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	if (tw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		sk_nulls_del_node_init_rcu((struct sock *)tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (twp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		*twp = tw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	} else if (tw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		/* Silly. Should hash-dance instead... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		inet_twsk_deschedule_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) not_unique:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static u32 inet_sk_port_offset(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	const struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 					  inet->inet_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 					  inet->inet_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Searches for an exsiting socket in the ehash bucket list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * Returns true if found, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static bool inet_ehash_lookup_by_sk(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 				    struct hlist_nulls_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	const int sdif = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	const int dif = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	const struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct sock *esk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	sk_nulls_for_each_rcu(esk, node, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		if (esk->sk_hash != sk->sk_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		if (sk->sk_family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			if (unlikely(INET_MATCH(esk, net, acookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 						sk->sk_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 						sk->sk_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 						ports, dif, sdif))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		else if (sk->sk_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			if (unlikely(INET6_MATCH(esk, net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 						 &sk->sk_v6_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 						 &sk->sk_v6_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 						 ports, dif, sdif))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Insert a socket into ehash, and eventually remove another one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)  * (The another one can be a SYN_RECV or TIMEWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)  * If an existing socket already exists, socket sk is not inserted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)  * and sets found_dup_sk parameter to true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct hlist_nulls_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	struct inet_ehash_bucket *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	WARN_ON_ONCE(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	sk->sk_hash = sk_ehashfn(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	list = &head->chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	if (osk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		ret = sk_nulls_del_node_init_rcu(osk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	} else if (found_dup_sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		if (*found_dup_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		__sk_nulls_add_node_rcu(sk, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	if (ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		percpu_counter_inc(sk->sk_prot->orphan_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		inet_sk_set_state(sk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		sock_set_flag(sk, SOCK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		inet_csk_destroy_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	return ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static int inet_reuseport_add_sock(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 				   struct inet_listen_hashbucket *ilb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	const struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	struct sock *sk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	kuid_t uid = sock_i_uid(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		if (sk2 != sk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		    sk2->sk_family == sk->sk_family &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		    inet_csk(sk2)->icsk_bind_hash == tb &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		    inet_rcv_saddr_equal(sk, sk2, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 			return reuseport_add_sock(sk, sk2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 						  inet_rcv_saddr_any(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int __inet_hash(struct sock *sk, struct sock *osk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	struct inet_listen_hashbucket *ilb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (sk->sk_state != TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		inet_ehash_nolisten(sk, osk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	WARN_ON(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	spin_lock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	if (sk->sk_reuseport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		err = inet_reuseport_add_sock(sk, ilb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	inet_hash2(hashinfo, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	ilb->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	sock_set_flag(sk, SOCK_RCU_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	spin_unlock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) EXPORT_SYMBOL(__inet_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) int inet_hash(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	if (sk->sk_state != TCP_CLOSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		err = __inet_hash(sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) EXPORT_SYMBOL_GPL(inet_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) void inet_unhash(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct inet_listen_hashbucket *ilb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	spinlock_t *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	if (sk_unhashed(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (sk->sk_state == TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		lock = &ilb->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	spin_lock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (sk_unhashed(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	if (rcu_access_pointer(sk->sk_reuseport_cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		reuseport_detach_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	if (ilb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		inet_unhash2(hashinfo, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		ilb->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	__sk_nulls_del_node_init_rcu(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	spin_unlock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) EXPORT_SYMBOL_GPL(inet_unhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int __inet_hash_connect(struct inet_timewait_death_row *death_row,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		struct sock *sk, u32 port_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		int (*check_established)(struct inet_timewait_death_row *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 			struct sock *, __u16, struct inet_timewait_sock **))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	struct inet_hashinfo *hinfo = death_row->hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	struct inet_timewait_sock *tw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	struct inet_bind_hashbucket *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	int port = inet_sk(sk)->inet_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	struct inet_bind_bucket *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	u32 remaining, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	int ret, i, low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	static u32 hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	int l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	if (port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		head = &hinfo->bhash[inet_bhashfn(net, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 						  hinfo->bhash_size)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		tb = inet_csk(sk)->icsk_bind_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		spin_lock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 			inet_ehash_nolisten(sk, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 			spin_unlock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		/* No definite answer... Walk to established hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		ret = check_established(death_row, sk, port, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	l3mdev = inet_sk_bound_l3mdev(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	inet_get_local_port_range(net, &low, &high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	high++; /* [32768, 60999] -> [32768, 61000[ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	remaining = high - low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	if (likely(remaining > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 		remaining &= ~1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	offset = (hint + port_offset) % remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	/* In first pass we try ports of @low parity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	 * inet_csk_get_port() does the opposite choice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	offset &= ~1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) other_parity_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	port = low + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	for (i = 0; i < remaining; i += 2, port += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		if (unlikely(port >= high))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 			port -= remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		if (inet_is_local_reserved_port(net, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		head = &hinfo->bhash[inet_bhashfn(net, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 						  hinfo->bhash_size)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		spin_lock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		/* Does not bother with rcv_saddr checks, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		 * the established check is already unique enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		inet_bind_bucket_for_each(tb, &head->chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 			if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 			    tb->port == port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 				if (tb->fastreuse >= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 				    tb->fastreuseport >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 					goto next_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 				WARN_ON(hlist_empty(&tb->owners));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 				if (!check_established(death_row, sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 						       port, &tw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 					goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 				goto next_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 					     net, head, port, l3mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		if (!tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 			spin_unlock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		tb->fastreuse = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 		tb->fastreuseport = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) next_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		spin_unlock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	if ((offset & 1) && remaining > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		goto other_parity_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	hint += i + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	/* Head lock still held and bh's disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	inet_bind_hash(sk, tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	if (sk_unhashed(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		inet_sk(sk)->inet_sport = htons(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	if (tw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 		inet_twsk_bind_unhash(tw, hinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	spin_unlock(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	if (tw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 		inet_twsk_deschedule_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)  * Bind a port for a connect operation and hash it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int inet_hash_connect(struct inet_timewait_death_row *death_row,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		      struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	u32 port_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	if (!inet_sk(sk)->inet_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 		port_offset = inet_sk_port_offset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	return __inet_hash_connect(death_row, sk, port_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 				   __inet_check_established);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) EXPORT_SYMBOL_GPL(inet_hash_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) void inet_hashinfo_init(struct inet_hashinfo *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		spin_lock_init(&h->listening_hash[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 				      i + LISTENING_NULLS_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		h->listening_hash[i].count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	h->lhash2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) EXPORT_SYMBOL_GPL(inet_hashinfo_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static void init_hashinfo_lhash2(struct inet_hashinfo *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	for (i = 0; i <= h->lhash2_mask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 		spin_lock_init(&h->lhash2[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		INIT_HLIST_HEAD(&h->lhash2[i].head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 		h->lhash2[i].count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 				unsigned long numentries, int scale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 				unsigned long low_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 				unsigned long high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 	h->lhash2 = alloc_large_system_hash(name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 					    sizeof(*h->lhash2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 					    numentries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 					    scale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 					    0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 					    NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 					    &h->lhash2_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 					    low_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 					    high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	init_hashinfo_lhash2(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	if (!h->lhash2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	h->lhash2_mask = INET_LHTABLE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	/* INET_LHTABLE_SIZE must be a power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 	init_hashinfo_lhash2(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	unsigned int locksz = sizeof(spinlock_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	unsigned int i, nblocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 	if (locksz != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 		/* allocate 2 cache lines or at least one spinlock per cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 		/* no more locks than number of hash buckets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 		hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 		if (!hashinfo->ehash_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 		for (i = 0; i < nblocks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 			spin_lock_init(&hashinfo->ehash_locks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 	hashinfo->ehash_locks_mask = nblocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);