Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * INET		An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *		operating system.  INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *		interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Authors:	Lotsa people, from code originally in tcp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #ifndef _INET_HASHTABLES_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define _INET_HASHTABLES_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <net/inet_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <net/inet_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <net/tcp_states.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <net/netns/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /* This is for all connections with a full identity, no wildcards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * The 'e' prefix stands for Establish, but we really put all sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * but LISTEN ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) struct inet_ehash_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct hlist_nulls_head chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* There are a few simple rules, which allow for local port reuse by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * an application.  In essence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *	1) Sockets bound to different interfaces may share a local port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *	   Failing that, goto test 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *	2) If all sockets have sk->sk_reuse set, and none of them are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *	   TCP_LISTEN state, the port may be shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *	   Failing that, goto test 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *	   address, and none of them are the same, the port may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *	   shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *	   Failing this, the port cannot be shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * The interesting point, is test #2.  This is what an FTP server does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * all day.  To optimize this case we use a specific flag bit defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * below.  As we add sockets to a bind bucket list, we perform a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * As long as all sockets added to a bind bucket pass this test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * the flag bit will be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * The resulting situation is that tcp_v[46]_verify_bind() can just check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * for this flag bit, if it is set and the socket trying to bind has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * sk->sk_reuse set, we don't even have to walk the owners list at all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * we return that it is ok to bind this socket to the requested local port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * Sounds like a lot of work, but it is worth it.  In a more naive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * implementation (ie. current FreeBSD etc.) the entire list of ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * must be walked for each data port opened by an ftp server.  Needless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * to say, this does not scale at all.  With a couple thousand FTP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * users logged onto your box, isn't it nice to know that new data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * ports are created in O(1) time?  I thought so. ;-)	-DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define FASTREUSEPORT_ANY	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define FASTREUSEPORT_STRICT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) struct inet_bind_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	possible_net_t		ib_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int			l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned short		port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	signed char		fastreuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	signed char		fastreuseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	kuid_t			fastuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct in6_addr		fast_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	__be32			fast_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	unsigned short		fast_sk_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	bool			fast_ipv6_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct hlist_node	node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct hlist_head	owners;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static inline struct net *ib_net(struct inet_bind_bucket *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	return read_pnet(&ib->ib_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define inet_bind_bucket_for_each(tb, head) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	hlist_for_each_entry(tb, head, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct inet_bind_hashbucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct hlist_head	chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Sockets can be hashed in established or listening table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * We must use different 'nulls' end-of-chain value for all hash buckets :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * A socket might transition from ESTABLISH to LISTEN state without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * RCU grace period. A lookup in ehash table needs to handle this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define LISTENING_NULLS_BASE (1U << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct inet_listen_hashbucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	unsigned int		count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		struct hlist_head	head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		struct hlist_nulls_head	nulls_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* This is for listening sockets, thus all sockets which possess wildcards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct inet_hashinfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/* This is for sockets with full identity only.  Sockets here will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * always be without wildcards and will have the following invariant:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	struct inet_ehash_bucket	*ehash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	spinlock_t			*ehash_locks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned int			ehash_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsigned int			ehash_locks_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/* Ok, let's try this, I give up, we do need a local binding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * TCP hash as well as the others for fast bind/connect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct kmem_cache		*bind_bucket_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	struct inet_bind_hashbucket	*bhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	unsigned int			bhash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	/* The 2nd listener table hashed by local port and address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	unsigned int			lhash2_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct inet_listen_hashbucket	*lhash2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	/* All the above members are written once at bootup and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * never written again _or_ are predominantly read-access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 * Now align to a new cache line as all the following members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * might be often dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	/* All sockets in TCP_LISTEN state will be in listening_hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * This is the only table where wildcard'd TCP sockets can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 * exist.  listening_hash is only hashed by local port number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * If lhash2 is initialized, the same socket will also be hashed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * to lhash2 by port and address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 					____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline struct inet_listen_hashbucket *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	return &h->lhash2[hash & h->lhash2_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline struct inet_ehash_bucket *inet_ehash_bucket(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	return &hashinfo->ehash[hash & hashinfo->ehash_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline spinlock_t *inet_ehash_lockp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	kfree(h->lhash2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	h->lhash2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	kvfree(hashinfo->ehash_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	hashinfo->ehash_locks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 					int dif, int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 				 bound_dev_if, dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct inet_bind_bucket *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			struct inet_bind_hashbucket *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			const unsigned short snum, int l3mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void inet_bind_bucket_destroy(struct kmem_cache *cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			      struct inet_bind_bucket *tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			       const u32 bhash_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return (lport + net_hash_mix(net)) & (bhash_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		    const unsigned short snum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* These can have wildcards, don't try too hard. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline int inet_sk_listen_hashfn(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Caller must disable local BH processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int __inet_inherit_port(const struct sock *sk, struct sock *child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void inet_put_port(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void inet_hashinfo_init(struct inet_hashinfo *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			 unsigned long numentries, int scale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			 unsigned long low_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			 unsigned long high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			 bool *found_dup_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int __inet_hash(struct sock *sk, struct sock *osk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int inet_hash(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void inet_unhash(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct sock *__inet_lookup_listener(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 				    struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 				    struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				    const __be32 saddr, const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				    const __be32 daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				    const unsigned short hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				    const int dif, const int sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline struct sock *inet_lookup_listener(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		__be32 saddr, __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		__be32 daddr, __be16 dport, int dif, int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 				      daddr, ntohs(dport), dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Socket demux engine toys. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* What happens here is ugly; there's a pair of adjacent fields in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)    struct inet_sock; __be16 dport followed by __u16 num.  We want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)    search by pair, so we combine the keys into a single 32bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)    and compare with 32bit value read from &...->dport.  Let's at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)    make sure that it's not mixed with anything else...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)    On 64bit targets we combine comparisons with pair of adjacent __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)    fields in the same way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define INET_COMBINED_PORTS(__sport, __dport) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #else /* __LITTLE_ENDIAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define INET_COMBINED_PORTS(__sport, __dport) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #if (BITS_PER_LONG == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	const __addrpair __name = (__force __addrpair) ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				   (((__force __u64)(__be32)(__saddr)) << 32) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				   ((__force __u64)(__be32)(__daddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #else /* __LITTLE_ENDIAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	const __addrpair __name = (__force __addrpair) ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 				   (((__force __u64)(__be32)(__daddr)) << 32) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				   ((__force __u64)(__be32)(__saddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #endif /* __BIG_ENDIAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	(((__sk)->sk_portpair == (__ports))			&&	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 ((__sk)->sk_addrpair == (__cookie))			&&	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 (((__sk)->sk_bound_dev_if == (__dif))			||	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	  ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 net_eq(sock_net(__sk), (__net)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #else /* 32-bit arch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	const int __name __deprecated __attribute__((unused))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	(((__sk)->sk_portpair == (__ports))		&&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 ((__sk)->sk_daddr	== (__saddr))		&&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 ((__sk)->sk_rcv_saddr	== (__daddr))		&&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 (((__sk)->sk_bound_dev_if == (__dif))		||		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	  ((__sk)->sk_bound_dev_if == (__sdif)))	&&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 net_eq(sock_net(__sk), (__net)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #endif /* 64-bit arch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * not check it for lookups anymore, thanks Alexey. -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct sock *__inet_lookup_established(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 				       struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 				       const __be32 saddr, const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 				       const __be32 daddr, const u16 hnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				       const int dif, const int sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline struct sock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				const __be32 saddr, const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 				const __be32 daddr, const __be16 dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				const int dif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 					 ntohs(dport), dif, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline struct sock *__inet_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					 struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 					 struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 					 const __be32 saddr, const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 					 const __be32 daddr, const __be16 dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 					 const int dif, const int sdif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 					 bool *refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	u16 hnum = ntohs(dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 				       daddr, hnum, dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	*refcounted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	*refcounted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 				      sport, daddr, hnum, dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline struct sock *inet_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 				       struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				       struct sk_buff *skb, int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 				       const __be32 saddr, const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 				       const __be32 daddr, const __be16 dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 				       const int dif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	bool refcounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			   dport, dif, 0, &refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 					     struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 					     int doff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 					     const __be16 sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 					     const __be16 dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 					     const int sdif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 					     bool *refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	struct sock *sk = skb_steal_sock(skb, refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	const struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			     doff, iph->saddr, sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			     iph->daddr, dport, inet_iif(skb), sdif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			     refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) u32 inet6_ehashfn(const struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		  const struct in6_addr *laddr, const u16 lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		  const struct in6_addr *faddr, const __be16 fport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static inline void sk_daddr_set(struct sock *sk, __be32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	sk->sk_daddr = addr; /* alias of inet_daddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int __inet_hash_connect(struct inet_timewait_death_row *death_row,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			struct sock *sk, u32 port_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			int (*check_established)(struct inet_timewait_death_row *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 						 struct sock *, __u16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 						 struct inet_timewait_sock **));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int inet_hash_connect(struct inet_timewait_death_row *death_row,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		      struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) #endif /* _INET_HASHTABLES_H */