^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * INET An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * operating system. INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Support for INET connection oriented protocols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors: See the TCP sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/inet_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/inet_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/inet_timewait_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/tcp_states.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/sock_reuseport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * if IPv6 only, and any IPv4 addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * if not IPv6 only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * and 0.0.0.0 equals to 0.0.0.0 only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) const struct in6_addr *sk2_rcv_saddr6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) bool sk1_ipv6only, bool sk2_ipv6only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) bool match_sk1_wildcard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) bool match_sk2_wildcard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* if both are mapped, treat as IPv4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!sk2_ipv6only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (sk1_rcv_saddr == sk2_rcv_saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return (match_sk1_wildcard && !sk1_rcv_saddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) (match_sk2_wildcard && !sk2_rcv_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (sk2_rcv_saddr6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 0.0.0.0 only equals to 0.0.0.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bool sk2_ipv6only, bool match_sk1_wildcard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bool match_sk2_wildcard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!sk2_ipv6only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (sk1_rcv_saddr == sk2_rcv_saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return (match_sk1_wildcard && !sk1_rcv_saddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) (match_sk2_wildcard && !sk2_rcv_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) bool match_wildcard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) inet6_rcv_saddr(sk2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sk->sk_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) sk2->sk_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ipv6_only_sock(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ipv6_only_sock(sk2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) match_wildcard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) match_wildcard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ipv6_only_sock(sk2), match_wildcard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) match_wildcard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) EXPORT_SYMBOL(inet_rcv_saddr_equal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bool inet_rcv_saddr_any(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (sk->sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return !sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void inet_get_local_port_range(struct net *net, int *low, int *high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *low = net->ipv4.ip_local_ports.range[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *high = net->ipv4.ip_local_ports.range[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) EXPORT_SYMBOL(inet_get_local_port_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int inet_csk_bind_conflict(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) const struct inet_bind_bucket *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) bool relax, bool reuseport_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct sock *sk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bool reuse = sk->sk_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bool reuseport = !!sk->sk_reuseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) kuid_t uid = sock_i_uid((struct sock *)sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Unlike other sk lookup places we do not check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * for sk_net here, since _all_ the socks listed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * in tb->owners list belong to the same net - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * one this bucket belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sk_for_each_bound(sk2, &tb->owners) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (sk != sk2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) (!sk->sk_bound_dev_if ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) !sk2->sk_bound_dev_if ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (reuse && sk2->sk_reuse &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sk2->sk_state != TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if ((!relax ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) (!reuseport_ok &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) reuseport && sk2->sk_reuseport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) !rcu_access_pointer(sk->sk_reuseport_cb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) (sk2->sk_state == TCP_TIME_WAIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) uid_eq(uid, sock_i_uid(sk2))))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) inet_rcv_saddr_equal(sk, sk2, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } else if (!reuseport_ok ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) !reuseport || !sk2->sk_reuseport ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rcu_access_pointer(sk->sk_reuseport_cb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) (sk2->sk_state != TCP_TIME_WAIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) !uid_eq(uid, sock_i_uid(sk2)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (inet_rcv_saddr_equal(sk, sk2, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return sk2 != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Find an open port number for the socket. Returns with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * inet_bind_hashbucket lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static struct inet_bind_hashbucket *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct inet_bind_hashbucket *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bool relax = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int i, low, high, attempt_half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct inet_bind_bucket *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u32 remaining, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) l3mdev = inet_sk_bound_l3mdev(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ports_exhausted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) other_half_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) inet_get_local_port_range(net, &low, &high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) high++; /* [32768, 60999] -> [32768, 61000[ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (high - low < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) attempt_half = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (attempt_half) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int half = low + (((high - low) >> 2) << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (attempt_half == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) high = half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) low = half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) remaining = high - low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (likely(remaining > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) remaining &= ~1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) offset = prandom_u32() % remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* __inet_hash_connect() favors ports having @low parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * We do the opposite to not pollute connect() users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) offset |= 1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) other_parity_scan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) port = low + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (i = 0; i < remaining; i += 2, port += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (unlikely(port >= high))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) port -= remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (inet_is_local_reserved_port(net, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) head = &hinfo->bhash[inet_bhashfn(net, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) hinfo->bhash_size)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_lock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) inet_bind_bucket_for_each(tb, &head->chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) tb->port == port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!inet_csk_bind_conflict(sk, tb, relax, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto next_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) tb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) next_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spin_unlock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) offset--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!(offset & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto other_parity_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (attempt_half == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* OK we now try the upper half of the range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) attempt_half = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto other_half_scan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (net->ipv4.sysctl_ip_autobind_reuse && !relax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* We still have a chance to connect to different destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) relax = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto ports_exhausted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *port_ret = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *tb_ret = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) kuid_t uid = sock_i_uid(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (tb->fastreuseport <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!sk->sk_reuseport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (rcu_access_pointer(sk->sk_reuseport_cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!uid_eq(tb->fastuid, uid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* We only need to check the rcv_saddr if this tb was once marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * without fastreuseport and then was reset, as we can only know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * owners list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (tb->fastreuseport == FASTREUSEPORT_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (tb->fast_sk_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) inet6_rcv_saddr(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) tb->fast_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sk->sk_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) tb->fast_ipv6_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ipv6_only_sock(sk), true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ipv6_only_sock(sk), true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) kuid_t uid = sock_i_uid(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (hlist_empty(&tb->owners)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tb->fastreuse = reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (sk->sk_reuseport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) tb->fastreuseport = FASTREUSEPORT_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tb->fastuid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tb->fast_rcv_saddr = sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) tb->fast_ipv6_only = ipv6_only_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) tb->fast_sk_family = sk->sk_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tb->fastreuseport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!reuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tb->fastreuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (sk->sk_reuseport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* We didn't match or we don't have fastreuseport set on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * the tb, but we have sk_reuseport set on this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * and we know that there are no bind conflicts with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * this socket in this tb, so reset our tb's reuseport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * settings so that any subsequent sockets that match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * our current socket will be put on the fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * If we reset we need to set FASTREUSEPORT_STRICT so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * do extra checking for all subsequent sk_reuseport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * socks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!sk_reuseport_match(tb, sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tb->fastreuseport = FASTREUSEPORT_STRICT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) tb->fastuid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) tb->fast_rcv_saddr = sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tb->fast_ipv6_only = ipv6_only_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) tb->fast_sk_family = sk->sk_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) tb->fastreuseport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Obtain a reference to a local port for the given sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * if snum is zero it means select any available local port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * We try to allocate an odd port (and leave even ports for connect())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int inet_csk_get_port(struct sock *sk, unsigned short snum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int ret = 1, port = snum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct inet_bind_hashbucket *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct inet_bind_bucket *tb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) l3mdev = inet_sk_bound_l3mdev(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) head = inet_csk_find_open_port(sk, &tb, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) goto tb_not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) head = &hinfo->bhash[inet_bhashfn(net, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) hinfo->bhash_size)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_lock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) inet_bind_bucket_for_each(tb, &head->chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tb->port == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) goto tb_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) tb_not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) net, head, port, l3mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) tb_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!hlist_empty(&tb->owners)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (sk->sk_reuse == SK_FORCE_REUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if ((tb->fastreuse > 0 && reuse) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) sk_reuseport_match(tb, sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (inet_csk_bind_conflict(sk, tb, true, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) inet_csk_update_fastreuse(tb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!inet_csk(sk)->icsk_bind_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) inet_bind_hash(sk, tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_bh(&head->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) EXPORT_SYMBOL_GPL(inet_csk_get_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Wait for an incoming connection, avoid race conditions. This must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * with the socket locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * True wake-one mechanism for incoming connections: only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * one process gets woken up, not the 'whole herd'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Since we do not 'race & poll' for established sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * anymore, the common case will execute the loop only once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Subtle issue: "add_wait_queue_exclusive()" will be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * after any current non-exclusive waiters, and we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * it will always _stay_ after any new non-exclusive waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * because all non-exclusive waiters are added at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * beginning of the wait-queue. As such, it's ok to "drop"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * our exclusiveness temporarily when we get woken up without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * having to remove and re-insert us on the wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) prepare_to_wait_exclusive(sk_sleep(sk), &wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (reqsk_queue_empty(&icsk->icsk_accept_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) timeo = schedule_timeout(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) sched_annotate_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (sk->sk_state != TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) finish_wait(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * This will accept the next outstanding connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct request_sock_queue *queue = &icsk->icsk_accept_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct request_sock *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct sock *newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* We need to make sure that this socket is listening,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * and that it has something pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (sk->sk_state != TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Find already established connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (reqsk_queue_empty(queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* If this is a non blocking socket don't sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) error = inet_csk_wait_for_connect(sk, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) req = reqsk_queue_remove(queue, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) newsk = req->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (sk->sk_protocol == IPPROTO_TCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) tcp_rsk(req)->tfo_listener) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_lock_bh(&queue->fastopenq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (tcp_rsk(req)->tfo_listener) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* We are still waiting for the final ACK from 3WHS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * so can't free req now. Instead, we set req->sk to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * NULL to signify that the child socket is taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * so reqsk_fastopen_remove() will free the req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * when 3WHS finishes (or is aborted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) req->sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_unlock_bh(&queue->fastopenq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (newsk && mem_cgroup_sockets_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int amt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* atomically get the memory usage, set and charge the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * newsk->sk_memcg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) lock_sock(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* The socket has not been accepted yet, no need to look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * newsk->sk_wmem_queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) amt = sk_mem_pages(newsk->sk_forward_alloc +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) atomic_read(&newsk->sk_rmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) mem_cgroup_sk_alloc(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (newsk->sk_memcg && amt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) release_sock(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) newsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *err = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) EXPORT_SYMBOL(inet_csk_accept);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Using different timers for retransmit, delayed acks and probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * We may wish use just one timer maintaining a list of expire jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * to optimize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) void inet_csk_init_xmit_timers(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) void (*retransmit_handler)(struct timer_list *t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void (*delack_handler)(struct timer_list *t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) void (*keepalive_handler)(struct timer_list *t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) timer_setup(&sk->sk_timer, keepalive_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) icsk->icsk_pending = icsk->icsk_ack.pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) EXPORT_SYMBOL(inet_csk_init_xmit_timers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void inet_csk_clear_xmit_timers(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) icsk->icsk_pending = icsk->icsk_ack.pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) sk_stop_timer(sk, &icsk->icsk_delack_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) sk_stop_timer(sk, &sk->sk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void inet_csk_delete_keepalive_timer(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) sk_stop_timer(sk, &sk->sk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct dst_entry *inet_csk_route_req(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct flowi4 *fl4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) const struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) const struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct net *net = read_pnet(&ireq->ireq_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct ip_options_rcu *opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) opt = rcu_dereference(ireq->ireq_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) sk->sk_protocol, inet_sk_flowi_flags(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ireq->ir_loc_addr, ireq->ir_rmt_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) htons(ireq->ir_num), sk->sk_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) security_req_classify_flow(req, flowi4_to_flowi(fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rt = ip_route_output_flow(net, fl4, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) goto no_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) goto route_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) route_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) no_route:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) EXPORT_SYMBOL_GPL(inet_csk_route_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct sock *newsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) const struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) const struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct net *net = read_pnet(&ireq->ireq_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct inet_sock *newinet = inet_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct ip_options_rcu *opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct flowi4 *fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) opt = rcu_dereference(ireq->ireq_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) fl4 = &newinet->cork.fl.u.ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) sk->sk_protocol, inet_sk_flowi_flags(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ireq->ir_loc_addr, ireq->ir_rmt_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) htons(ireq->ir_num), sk->sk_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) security_req_classify_flow(req, flowi4_to_flowi(fl4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) rt = ip_route_output_flow(net, fl4, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto no_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto route_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) route_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) no_route:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Decide when to expire the request and when to resend SYN-ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void syn_ack_recalc(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) const int max_syn_ack_retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) const u8 rskq_defer_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int *expire, int *resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!rskq_defer_accept) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *expire = req->num_timeout >= max_syn_ack_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *resend = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) *expire = req->num_timeout >= max_syn_ack_retries &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Do not resend while waiting for data after ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * start to resend on end of deferring period to give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * last chance for data or ACK to create established socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) *resend = !inet_rsk(req)->acked ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) req->num_timeout >= rskq_defer_accept - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int err = req->rsk_ops->rtx_syn_ack(parent, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) req->num_retrans++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) EXPORT_SYMBOL(inet_rtx_syn_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* return true if req was found in the ehash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static bool reqsk_queue_unlink(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (sk_hashed(req_to_sk(req))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bool unlinked = reqsk_queue_unlink(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (unlinked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return unlinked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) inet_csk_reqsk_queue_drop(sk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void reqsk_timer_handler(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct request_sock *req = from_timer(req, t, rsk_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct sock *sk_listener = req->rsk_listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct net *net = sock_net(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct inet_connection_sock *icsk = inet_csk(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct request_sock_queue *queue = &icsk->icsk_accept_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int max_syn_ack_retries, qlen, expire = 0, resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Normally all the openreqs are young and become mature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * (i.e. converted to established socket) for first timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * If synack was not acknowledged for 1 second, it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * one of the following things: synack was lost, ack was lost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * rtt is high or nobody planned to ack (i.e. synflood).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * When server is a bit loaded, queue is populated with old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * open requests, reducing effective size of queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * When server is well loaded, queue size reduces to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * after several minutes of work. It is not synflood,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * it is normal operation. The solution is pruning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * too old entries overriding normal timeout, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * situation becomes dangerous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Essentially, we reserve half of room for young
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * embrions; and abort old ones without pity, if old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * ones are about to clog our table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) qlen = reqsk_queue_len(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int young = reqsk_queue_len_young(queue) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) while (max_syn_ack_retries > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (qlen < young)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) max_syn_ack_retries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) young <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) &expire, &resend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) req->rsk_ops->syn_ack_timeout(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (!expire &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) (!resend ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) !inet_rtx_syn_ack(sk_listener, req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) inet_rsk(req)->acked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (req->num_timeout++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) atomic_dec(&queue->young);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) mod_timer(&req->rsk_timer, jiffies + timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static void reqsk_queue_hash_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) mod_timer(&req->rsk_timer, jiffies + timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) inet_ehash_insert(req_to_sk(req), NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* before letting lookups find us, make sure all req fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * are committed to memory and refcnt initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) refcount_set(&req->rsk_refcnt, 2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) reqsk_queue_hash_req(req, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) inet_csk_reqsk_queue_added(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) const gfp_t priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct inet_connection_sock *icsk = inet_csk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!icsk->icsk_ulp_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (icsk->icsk_ulp_ops->clone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) icsk->icsk_ulp_ops->clone(req, newsk, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * inet_csk_clone_lock - clone an inet socket, and lock its clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * @sk: the socket to clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * @req: request_sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct sock *inet_csk_clone_lock(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) const gfp_t priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct sock *newsk = sk_clone_lock(sk, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (newsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct inet_connection_sock *newicsk = inet_csk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) inet_sk_set_state(newsk, TCP_SYN_RECV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) newicsk->icsk_bind_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* listeners have SOCK_RCU_FREE, not the children */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) sock_reset_flag(newsk, SOCK_RCU_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) inet_sk(newsk)->mc_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) newsk->sk_mark = inet_rsk(req)->ir_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) atomic64_set(&newsk->sk_cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) atomic64_read(&inet_rsk(req)->ir_cookie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) newicsk->icsk_retransmits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) newicsk->icsk_backoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) newicsk->icsk_probes_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) newicsk->icsk_probes_tstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Deinitialize accept_queue to trap illegal accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) inet_clone_ulp(req, newsk, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) security_inet_csk_clone(newsk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * At this point, there should be no process reference to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * socket, and thus no user references at all. Therefore we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * can assume the socket waitqueue is inactive and nobody will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * try to jump onto it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) void inet_csk_destroy_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) WARN_ON(sk->sk_state != TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) WARN_ON(!sock_flag(sk, SOCK_DEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* It cannot be in hash table! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) WARN_ON(!sk_unhashed(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) sk->sk_prot->destroy(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) sk_stream_kill_queues(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) xfrm_sk_free_policy(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) sk_refcnt_debug_release(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) percpu_counter_dec(sk->sk_prot->orphan_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) EXPORT_SYMBOL(inet_csk_destroy_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* This function allows to force a closure of a socket after the call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * tcp/dccp_create_openreq_child().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) void inet_csk_prepare_forced_close(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) __releases(&sk->sk_lock.slock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* sk_clone_lock locked the socket and set refcnt to 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) inet_csk_prepare_for_destroy_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) inet_sk(sk)->inet_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) EXPORT_SYMBOL(inet_csk_prepare_forced_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int inet_csk_listen_start(struct sock *sk, int backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int err = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) reqsk_queue_alloc(&icsk->icsk_accept_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) sk->sk_ack_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) inet_csk_delack_init(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* There is race window here: we announce ourselves listening,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * but this transition is still not validated by get_port().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * It is OK, because this socket enters to hash table only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * after validation is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) inet_sk_state_store(sk, TCP_LISTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) inet->inet_sport = htons(inet->inet_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) sk_dst_reset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) err = sk->sk_prot->hash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (likely(!err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) inet_sk_set_state(sk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) EXPORT_SYMBOL_GPL(inet_csk_listen_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void inet_child_forget(struct sock *sk, struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct sock *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) sk->sk_prot->disconnect(child, O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) sock_orphan(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) percpu_counter_inc(sk->sk_prot->orphan_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) BUG_ON(sk != req->rsk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* Paranoid, to prevent race condition if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * an inbound pkt destined for child is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * blocked by sock lock in tcp_v4_rcv().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Also to satisfy an assertion in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * tcp_v4_destroy_sock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) inet_csk_destroy_sock(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct sock *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) spin_lock(&queue->rskq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (unlikely(sk->sk_state != TCP_LISTEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) inet_child_forget(sk, req, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) child = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) req->sk = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) req->dl_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (queue->rskq_accept_head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) WRITE_ONCE(queue->rskq_accept_head, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) queue->rskq_accept_tail->dl_next = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) queue->rskq_accept_tail = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) sk_acceptq_added(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) spin_unlock(&queue->rskq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct request_sock *req, bool own_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (own_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) inet_csk_reqsk_queue_drop(sk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (inet_csk_reqsk_queue_add(sk, req, child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Too bad, another child took ownership of the request, undo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) bh_unlock_sock(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) sock_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) EXPORT_SYMBOL(inet_csk_complete_hashdance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * This routine closes sockets which have been at least partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * opened, but not yet accepted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) void inet_csk_listen_stop(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct request_sock_queue *queue = &icsk->icsk_accept_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct request_sock *next, *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* Following specs, it would be better either to send FIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * (and enter FIN-WAIT-1, it is normal close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * or to send active reset (abort).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * Certainly, it is pretty dangerous while synflood, but it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * bad justification for our negligence 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * To be honest, we are not able to make either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * of the variants now. --ANK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct sock *child = req->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bh_lock_sock(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) WARN_ON(sock_owned_by_user(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) sock_hold(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) inet_child_forget(sk, req, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) bh_unlock_sock(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) sock_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (queue->fastopenq.rskq_rst_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* Free all the reqs queued in rskq_rst_head. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) spin_lock_bh(&queue->fastopenq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) req = queue->fastopenq.rskq_rst_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) queue->fastopenq.rskq_rst_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) spin_unlock_bh(&queue->fastopenq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) while (req != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) next = req->dl_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) req = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) WARN_ON_ONCE(sk->sk_ack_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) const struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) sin->sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) sin->sin_addr.s_addr = inet->inet_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) sin->sin_port = inet->inet_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) const struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) const struct ip_options_rcu *inet_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) __be32 daddr = inet->inet_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct flowi4 *fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) inet_opt = rcu_dereference(inet->inet_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (inet_opt && inet_opt->opt.srr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) daddr = inet_opt->opt.faddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) fl4 = &fl->u.ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) inet->inet_saddr, inet->inet_dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) inet->inet_sport, sk->sk_protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (IS_ERR(rt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) sk_setup_caps(sk, &rt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return &rt->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct dst_entry *dst = __sk_dst_check(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dst = __sk_dst_check(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);