^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * INET An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * operating system. INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Implementation of the Transmission Control Protocol(TCP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * IPv4 specific functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * code split from:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * linux/ipv4/tcp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * linux/ipv4/tcp_input.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * linux/ipv4/tcp_output.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * See tcp.c for author information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * David S. Miller : New socket lookup architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * This code is dedicated to John Dyson.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * David S. Miller : Change semantics of established hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * half is devoted to TIME_WAIT sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * and the rest go in the other half.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Andi Kleen : Add support for syncookies and fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * some bugs: ip options weren't passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * the TCP layer, missed a check for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ACK bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Andi Kleen : Implemented fast path mtu discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Fixed many serious bugs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * request_sock handling and moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * most of it into the af independent code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Added tail drop and some other bugfixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Added new listen semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Mike McLagan : Routing by source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Juan Jose Ciarlante: ip_dynaddr bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Andi Kleen: various fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Vitaly E. Lavrov : Transparent proxy revived after year
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * coma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Andi Kleen : Fix new listen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Andi Kleen : Fix accept error reporting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * a single port at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define pr_fmt(fmt) "TCP: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/bottom_half.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/times.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <net/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <net/inet_hashtables.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <net/transp_v6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <net/timewait_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <net/secure_seq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <net/busy_poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/btf_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <trace/events/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __be32 daddr, __be32 saddr, const struct tcphdr *th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct inet_hashinfo tcp_hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) EXPORT_SYMBOL(tcp_hashinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static u32 tcp_v4_init_seq(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return secure_tcp_seq(ip_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) tcp_hdr(skb)->dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) tcp_hdr(skb)->source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) const struct inet_timewait_sock *tw = inet_twsk(sktw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (reuse == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Still does not detect *everything* that goes through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * lo, since we require a loopback src or dst address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * or direct binding to 'lo' interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bool loopback = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) loopback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (tw->tw_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) loopback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (ipv4_is_loopback(tw->tw_daddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ipv4_is_loopback(tw->tw_rcv_saddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) loopback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (!loopback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) reuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* With PAWS, it is safe from the viewpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) of data integrity. Even without PAWS it is safe provided sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spaces do not overlap i.e. at data rates <= 80Mbit/sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) Actually, the idea is close to VJ's one, only timestamp cache is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) held not per host, but per port pair and TW bucket is used as state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) holder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) If TW bucket has been already destroyed we fall back to VJ's scheme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) and use initial timestamp retrieved from peer table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (tcptw->tw_ts_recent_stamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) (!twp || (reuse && time_after32(ktime_get_seconds(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) tcptw->tw_ts_recent_stamp)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* In case of repair and re-using TIME-WAIT sockets we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * want to be sure that it is safe as above but honor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * sequence numbers and time stamps set as part of the repair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Without this check re-using a TIME-WAIT socket with TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * repair would accumulate a -1 on the repair assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * sequence number. The first time it is reused the sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * is -1, the second time -2, etc. This fixes that issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * without appearing to create any others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (likely(!tp->repair)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) seq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) WRITE_ONCE(tp->write_seq, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sock_hold(sktw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL_GPL(tcp_twsk_unique);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* This check is replicated from tcp_v4_connect() and intended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * prevent BPF program called below from accessing bytes that are out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * of the bound specified by user in addr_len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (addr_len < sizeof(struct sockaddr_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) sock_owned_by_me(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* This will initiate an outgoing connection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __be16 orig_sport, orig_dport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __be32 daddr, nexthop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct flowi4 *fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct rtable *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct ip_options_rcu *inet_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (addr_len < sizeof(struct sockaddr_in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (usin->sin_family != AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) nexthop = daddr = usin->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) inet_opt = rcu_dereference_protected(inet->inet_opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (inet_opt && inet_opt->opt.srr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) nexthop = inet_opt->opt.faddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) orig_sport = inet->inet_sport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) orig_dport = usin->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) fl4 = &inet->cork.fl.u.ip4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) IPPROTO_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) orig_sport, orig_dport, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (IS_ERR(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) err = PTR_ERR(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (err == -ENETUNREACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!inet_opt || !inet_opt->opt.srr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) daddr = fl4->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!inet->inet_saddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) inet->inet_saddr = fl4->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) sk_rcv_saddr_set(sk, inet->inet_saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Reset inherited state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) tp->rx_opt.ts_recent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) tp->rx_opt.ts_recent_stamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (likely(!tp->repair))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) WRITE_ONCE(tp->write_seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) inet->inet_dport = usin->sin_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) sk_daddr_set(sk, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) inet_csk(sk)->icsk_ext_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (inet_opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Socket identity is still unknown (sport may be zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * However we set state to SYN-SENT and not releasing socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * lock select source port, enter ourselves into the hash tables and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * complete initialization after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tcp_set_state(sk, TCP_SYN_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) err = inet_hash_connect(tcp_death_row, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) sk_set_txhash(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) inet->inet_sport, inet->inet_dport, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (IS_ERR(rt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) err = PTR_ERR(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* OK, now commit destination to socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) sk->sk_gso_type = SKB_GSO_TCPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) sk_setup_caps(sk, &rt->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (likely(!tp->repair)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!tp->write_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) WRITE_ONCE(tp->write_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) secure_tcp_seq(inet->inet_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) inet->inet_daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) inet->inet_sport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) usin->sin_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) inet->inet_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) inet->inet_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) inet->inet_id = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (tcp_fastopen_defer_connect(sk, &err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) err = tcp_connect(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * This unhashes the socket and releases the local port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) tcp_set_state(sk, TCP_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ip_rt_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) sk->sk_route_caps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) inet->inet_dport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) EXPORT_SYMBOL(tcp_v4_connect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * It can be called through tcp_release_cb() if socket was owned by user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * at the time tcp_v4_err() was called to handle ICMP message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void tcp_v4_mtu_reduced(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dst = inet_csk_update_pmtu(sk, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Something is about to be wrong... Remember soft error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * for the case, if this connection will not able to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sk->sk_err_soft = EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mtu = dst_mtu(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (inet->pmtudisc != IP_PMTUDISC_DONT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ip_sk_accept_pmtu(sk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) inet_csk(sk)->icsk_pmtu_cookie > mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) tcp_sync_mss(sk, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Resend the TCP packet because it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * clear that the old packet has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * dropped. This is the new "fast" path mtu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tcp_simple_retransmit(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } /* else let the usual retransmit timer handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) EXPORT_SYMBOL(tcp_v4_mtu_reduced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void do_redirect(struct sk_buff *skb, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct dst_entry *dst = __sk_dst_check(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) dst->ops->redirect(dst, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) void tcp_req_err(struct sock *sk, u32 seq, bool abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct request_sock *req = inet_reqsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* ICMPs are not backlogged, hence we cannot get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * an established socket here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (seq != tcp_rsk(req)->snt_isn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) } else if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Still in SYN_RECV, just remove it silently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * There is no good way to pass the error to the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * created socket, and POSIX does not want network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * errors returned from accept().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) inet_csk_reqsk_queue_drop(req->rsk_listener, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) tcp_listendrop(req->rsk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) EXPORT_SYMBOL(tcp_req_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* TCP-LD (RFC 6069) logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) s32 remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u32 delta_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (sock_owned_by_user(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (seq != tp->snd_una || !icsk->icsk_retransmits ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) !icsk->icsk_backoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) skb = tcp_rtx_queue_head(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (WARN_ON_ONCE(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) icsk->icsk_backoff--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tcp_mstamp_refresh(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (remaining > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) remaining, TCP_RTO_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* RTO revert clocked out retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Will retransmit now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) tcp_retransmit_timer(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) EXPORT_SYMBOL(tcp_ld_RTO_revert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * This routine is called by the ICMP module when it gets some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * sort of error condition. If err < 0 then the socket should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * be closed and the error returned to the user. If err > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * it's just the icmp type << 8 | icmp code. After adjustment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * header points to the first 8 bytes of the tcp header. We need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * to find the appropriate port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * The locking strategy used here is very "optimistic". When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * someone else accesses the socket the ICMP is just dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * and for some paths there is no check at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * A more general error queue to queue errors for later handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * is probably better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int tcp_v4_err(struct sk_buff *skb, u32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) const struct iphdr *iph = (const struct iphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct tcp_sock *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct inet_sock *inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) const int type = icmp_hdr(skb)->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) const int code = icmp_hdr(skb)->code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct request_sock *fastopen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u32 seq, snd_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) th->dest, iph->saddr, ntohs(th->source),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) inet_iif(skb), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (sk->sk_state == TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) inet_twsk_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) seq = ntohl(th->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (sk->sk_state == TCP_NEW_SYN_RECV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) type == ICMP_TIME_EXCEEDED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) (type == ICMP_DEST_UNREACH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) (code == ICMP_NET_UNREACH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) code == ICMP_HOST_UNREACH)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* If too many ICMPs get dropped on busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * servers this needs to be solved differently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * We do take care of PMTU discovery (RFC1191) special case :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * we can receive locally generated ICMP messages while socket is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (sk->sk_state == TCP_CLOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) fastopen = rcu_dereference(tp->fastopen_rsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (sk->sk_state != TCP_LISTEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) !between(seq, snd_una, tp->snd_nxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case ICMP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (!sock_owned_by_user(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) do_redirect(skb, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) case ICMP_SOURCE_QUENCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Just silently ignore these. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) case ICMP_PARAMETERPROB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) err = EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) case ICMP_DEST_UNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (code > NR_ICMP_UNREACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* We are not interested in TCP_LISTEN and open_requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * (SYN-ACKs send out by Linux are always <576bytes so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * they should go through unfragmented).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (sk->sk_state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) WRITE_ONCE(tp->mtu_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tcp_v4_mtu_reduced(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) err = icmp_err_convert[code].errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* check if this ICMP message allows revert of backoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * (see RFC 6069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!fastopen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tcp_ld_RTO_revert(sk, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) case ICMP_TIME_EXCEEDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) err = EHOSTUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) case TCP_SYN_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) case TCP_SYN_RECV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Only in fast or simultaneous open. If a fast open socket is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * already accepted it is treated as a connected one below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (fastopen && !fastopen->sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) sk->sk_err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) sk->sk_error_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) tcp_done(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) sk->sk_err_soft = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* If we've already connected we will keep trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * until we time out, or the user gives up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * rfc1122 4.2.3.9 allows to consider as hard errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * but it is obsoleted by pmtu discovery).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Note, that in modern internet, where routing is unreliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * and in each dark corner broken firewalls sit, sending random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * errors ordered by their masters even this two messages finally lose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * their original sense (even Linux sends invalid PORT_UNREACHs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * Now we are in compliance with RFCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * --ANK (980905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!sock_owned_by_user(sk) && inet->recverr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) sk->sk_err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sk->sk_error_report(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) } else { /* Only an error on timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sk->sk_err_soft = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) skb->csum_start = skb_transport_header(skb) - skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) skb->csum_offset = offsetof(struct tcphdr, check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* This routine computes an IPv4 TCP checksum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) const struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) EXPORT_SYMBOL(tcp_v4_send_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * This routine will send an RST to the other tcp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * for reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * Answer: if a packet caused RST, it is not for a socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * existing in our system, if it is matched to a socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * it is just duplicate segment or bug in other side's TCP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * So that we build reply only basing on parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * arrived with segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Exception: precedence violation. We do not implement it in any case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct tcphdr th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) } rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct ip_reply_arg arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct tcp_md5sig_key *key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) const __u8 *hash_location = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned char newhash[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int genhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct sock *sk1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) u64 transmit_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct sock *ctl_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Never send a reset in response to a reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (th->rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* If sk not NULL, it means we did a successful lookup and incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * route had to be correct. prequeue might have dropped our dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Swap the send and the receive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) memset(&rep, 0, sizeof(rep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rep.th.dest = th->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) rep.th.source = th->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) rep.th.doff = sizeof(struct tcphdr) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rep.th.rst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (th->ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) rep.th.seq = th->ack_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) rep.th.ack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) skb->len - (th->doff << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) memset(&arg, 0, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) arg.iov[0].iov_base = (unsigned char *)&rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) arg.iov[0].iov_len = sizeof(rep.th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) hash_location = tcp_parse_md5sig_option(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (sk && sk_fullsock(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* sdif set, means packet ingressed via a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * in an L3 domain and inet_iif is set to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) } else if (hash_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int sdif = tcp_v4_sdif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int dif = inet_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * active side is lost. Try to find listening socket through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * source port, and then find md5 key through listening socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * we are not loose security here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Incoming packet is checked with md5 hash with finding key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * no RST generated if md5 hash doesn't match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) th->source, ip_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ntohs(th->source), dif, sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* don't send rst if it can't find key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!sk1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* sdif set, means packet ingressed via a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * in an L3 domain and dif is set to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) l3index = sdif ? dif : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (genhash || memcmp(hash_location, newhash, 16) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rep.opt[0] = htonl((TCPOPT_NOP << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (TCPOPT_NOP << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) (TCPOPT_MD5SIG << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) TCPOLEN_MD5SIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Update length and the length the header thinks exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rep.th.doff = arg.iov[0].iov_len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) key, ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ip_hdr(skb)->daddr, &rep.th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ip_hdr(skb)->saddr, /* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) arg.iov[0].iov_len, IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) arg.csumoffset = offsetof(struct tcphdr, check) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* When socket is gone, all binding information is lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * routing might fail in this case. No choice here, if we choose to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * input interface, we will misroute in case of asymmetric route.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) arg.bound_dev_if = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (sk_fullsock(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) trace_tcp_send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) offsetof(struct inet_timewait_sock, tw_bound_dev_if));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) arg.tos = ip_hdr(skb)->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) inet_twsk(sk)->tw_mark : sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) inet_twsk(sk)->tw_priority : sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) transmit_time = tcp_transmit_time(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ip_send_unicast_reply(ctl_sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) skb, &TCP_SKB_CB(skb)->header.h4.opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) &arg, arg.iov[0].iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) transmit_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ctl_sk->sk_mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) outside socket context is ugly, certainly. What can I do?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static void tcp_v4_send_ack(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct sk_buff *skb, u32 seq, u32 ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) u32 win, u32 tsval, u32 tsecr, int oif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct tcp_md5sig_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) int reply_flags, u8 tos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct tcphdr th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) + (TCPOLEN_MD5SIG_ALIGNED >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) } rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct ip_reply_arg arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct sock *ctl_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u64 transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) memset(&rep.th, 0, sizeof(struct tcphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) memset(&arg, 0, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) arg.iov[0].iov_base = (unsigned char *)&rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) arg.iov[0].iov_len = sizeof(rep.th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (tsecr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) (TCPOPT_TIMESTAMP << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) TCPOLEN_TIMESTAMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rep.opt[1] = htonl(tsval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) rep.opt[2] = htonl(tsecr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Swap the send and the receive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rep.th.dest = th->source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) rep.th.source = th->dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) rep.th.doff = arg.iov[0].iov_len / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) rep.th.seq = htonl(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) rep.th.ack_seq = htonl(ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) rep.th.ack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rep.th.window = htons(win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int offset = (tsecr) ? 3 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) (TCPOPT_NOP << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) (TCPOPT_MD5SIG << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) TCPOLEN_MD5SIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) rep.th.doff = arg.iov[0].iov_len/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) key, ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ip_hdr(skb)->daddr, &rep.th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) arg.flags = reply_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ip_hdr(skb)->saddr, /* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) arg.iov[0].iov_len, IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) arg.csumoffset = offsetof(struct tcphdr, check) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (oif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) arg.bound_dev_if = oif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) arg.tos = tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) inet_twsk(sk)->tw_mark : sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) inet_twsk(sk)->tw_priority : sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) transmit_time = tcp_transmit_time(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ip_send_unicast_reply(ctl_sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) skb, &TCP_SKB_CB(skb)->header.h4.opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) &arg, arg.iov[0].iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) transmit_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ctl_sk->sk_mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct inet_timewait_sock *tw = inet_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) tcp_v4_send_ack(sk, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) tcp_time_stamp_raw() + tcptw->tw_ts_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) tcptw->tw_ts_recent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) tw->tw_bound_dev_if,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) tcp_twsk_md5_key(tcptw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) tw->tw_tos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) inet_twsk_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) tcp_sk(sk)->snd_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* RFC 7323 2.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * The window field (SEG.WND) of every outgoing segment, with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * exception of <SYN> segments, MUST be right-shifted by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Rcv.Wind.Shift bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) tcp_v4_send_ack(sk, skb, seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) tcp_rsk(req)->rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) req->ts_recent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ip_hdr(skb)->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * Send a SYN-ACK after having received a SYN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * This still operates on a request_sock only, not on a big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct tcp_fastopen_cookie *foc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) enum tcp_synack_type synack_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct sk_buff *syn_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) const struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct flowi4 fl4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) u8 tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* First, grab a route. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) (inet_sk(sk)->tos & INET_ECN_MASK) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) inet_sk(sk)->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!INET_ECN_is_capable(tos) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) tcp_bpf_ca_needs_ecn((struct sock *)req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) tos |= INET_ECN_ECT_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ireq->ir_rmt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rcu_dereference(ireq->ireq_opt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) err = net_xmit_eval(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * IPv4 request_sock destructor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static void tcp_v4_reqsk_destructor(struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * RFC2385 MD5 checksumming requires a mapping of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * IP address->MD5 Key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * We need to maintain these in the sk structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) EXPORT_SYMBOL(tcp_md5_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* l3index always overrides non-l3index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (old->l3index && new->l3index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (old->l3index == 0 && new->l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return old->prefixlen < new->prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* Find the Key structure for an address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) const union tcp_md5_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) int family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) const struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) const struct tcp_md5sig_info *md5sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) __be32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct tcp_md5sig_key *best_match = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) bool match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* caller either holds rcu_read_lock() or socket lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) md5sig = rcu_dereference_check(tp->md5sig_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!md5sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) hlist_for_each_entry_rcu(key, &md5sig->head, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) lockdep_sock_is_held(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (key->family != family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (key->l3index && key->l3index != l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (family == AF_INET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) mask = inet_make_mask(key->prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) match = (key->addr.a4.s_addr & mask) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) (addr->a4.s_addr & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) } else if (family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) key->prefixlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (match && better_md5_match(best_match, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) best_match = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return best_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) EXPORT_SYMBOL(__tcp_md5_do_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) const union tcp_md5_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) int family, u8 prefixlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) const struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) unsigned int size = sizeof(struct in_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) const struct tcp_md5sig_info *md5sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* caller either holds rcu_read_lock() or socket lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) md5sig = rcu_dereference_check(tp->md5sig_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!md5sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) size = sizeof(struct in6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) hlist_for_each_entry_rcu(key, &md5sig->head, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) lockdep_sock_is_held(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (key->family != family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (key->l3index != l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (!memcmp(&key->addr, addr, size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) key->prefixlen == prefixlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) const struct sock *addr_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) addr_sk->sk_bound_dev_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) EXPORT_SYMBOL(tcp_v4_md5_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* This can be called on a newly created socket, from other files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int family, u8 prefixlen, int l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) const u8 *newkey, u8 newkeylen, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* Add Key to the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct tcp_md5sig_info *md5sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* Pre-existing entry - just update that one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * Note that the key might be used concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * data_race() is telling kcsan that we do not care of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * key mismatches, since changing MD5 key on live flows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * can lead to packet drops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) data_race(memcpy(key->key, newkey, newkeylen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* Pairs with READ_ONCE() in tcp_md5_hash_key().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * Also note that a reader could catch new key->keylen value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * but old key->key[], this is the reason we use __GFP_ZERO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * at sock_kmalloc() time below these lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) WRITE_ONCE(key->keylen, newkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) md5sig = rcu_dereference_protected(tp->md5sig_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) lockdep_sock_is_held(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (!md5sig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) md5sig = kmalloc(sizeof(*md5sig), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (!md5sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) sk_nocaps_add(sk, NETIF_F_GSO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) INIT_HLIST_HEAD(&md5sig->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) rcu_assign_pointer(tp->md5sig_info, md5sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (!tcp_alloc_md5sig_pool()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) sock_kfree_s(sk, key, sizeof(*key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) memcpy(key->key, newkey, newkeylen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) key->keylen = newkeylen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) key->family = family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) key->prefixlen = prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) key->l3index = l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) memcpy(&key->addr, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) (family == AF_INET6) ? sizeof(struct in6_addr) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) sizeof(struct in_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) hlist_add_head_rcu(&key->node, &md5sig->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) EXPORT_SYMBOL(tcp_md5_do_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) u8 prefixlen, int l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (!key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) hlist_del_rcu(&key->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) kfree_rcu(key, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) EXPORT_SYMBOL(tcp_md5_do_del);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void tcp_clear_md5_list(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) struct tcp_md5sig_info *md5sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) hlist_del_rcu(&key->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) kfree_rcu(key, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct tcp_md5sig cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) u8 prefixlen = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) int l3index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (optlen < sizeof(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (sin->sin_family != AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (optname == TCP_MD5SIG_EXT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) prefixlen = cmd.tcpm_prefixlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (prefixlen > 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (optname == TCP_MD5SIG_EXT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (dev && netif_is_l3_master(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) l3index = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* ok to reference set/not set outside of rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * right now device MUST be an L3 master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (!dev || !l3index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!cmd.tcpm_keylen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) __be32 daddr, __be32 saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) const struct tcphdr *th, int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct tcp4_pseudohdr *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct tcphdr *_th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) bp = hp->scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) bp->saddr = saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) bp->daddr = daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) bp->pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) bp->protocol = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) bp->len = cpu_to_be16(nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) _th = (struct tcphdr *)(bp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) memcpy(_th, th, sizeof(*th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) _th->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ahash_request_set_crypt(hp->md5_req, &sg, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) sizeof(*bp) + sizeof(*th));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return crypto_ahash_update(hp->md5_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) __be32 daddr, __be32 saddr, const struct tcphdr *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct tcp_md5sig_pool *hp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) hp = tcp_get_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (!hp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goto clear_hash_noput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) req = hp->md5_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (crypto_ahash_init(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (tcp_md5_hash_key(hp, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ahash_request_set_crypt(req, NULL, md5_hash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (crypto_ahash_final(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) clear_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) clear_hash_noput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) memset(md5_hash, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct tcp_md5sig_pool *hp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct ahash_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) __be32 saddr, daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (sk) { /* valid for establish/request sockets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) saddr = sk->sk_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) daddr = sk->sk_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) const struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) saddr = iph->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) daddr = iph->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) hp = tcp_get_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (!hp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) goto clear_hash_noput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) req = hp->md5_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (crypto_ahash_init(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (tcp_md5_hash_key(hp, key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ahash_request_set_crypt(req, NULL, md5_hash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (crypto_ahash_final(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) goto clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) clear_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) tcp_put_md5sig_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) clear_hash_noput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) memset(md5_hash, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* Called with rcu_read_lock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int dif, int sdif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * This gets called for each TCP segment that arrives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * so we want to be efficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * We have 3 drop cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * o No MD5 hash and one expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * o MD5 hash and we're not expecting one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * o MD5 hash and its wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) const __u8 *hash_location = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct tcp_md5sig_key *hash_expected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) const struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) unsigned char newhash[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int genhash, l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* sdif set, means packet ingressed via a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * in an L3 domain and dif is set to the l3mdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) l3index = sdif ? dif : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) addr = (union tcp_md5_addr *)&iph->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) hash_location = tcp_parse_md5sig_option(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* We've parsed the options - do we have a hash? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (!hash_expected && !hash_location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (hash_expected && !hash_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!hash_expected && hash_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* Okay, so this is hash_expected and hash_location -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * so we need to calculate the checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) genhash = tcp_v4_md5_hash_skb(newhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) hash_expected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (genhash || memcmp(hash_location, newhash, 16) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) &iph->saddr, ntohs(th->source),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) &iph->daddr, ntohs(th->dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) genhash ? " tcp_v4_calc_md5_hash failed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) : "", l3index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static void tcp_v4_init_req(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct net *net = sock_net(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) const struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return inet_csk_route_req(sk, &fl->u.ip4, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct request_sock_ops tcp_request_sock_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) .family = PF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) .obj_size = sizeof(struct tcp_request_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) .rtx_syn_ack = tcp_rtx_synack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) .send_ack = tcp_v4_reqsk_send_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) .destructor = tcp_v4_reqsk_destructor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) .send_reset = tcp_v4_send_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) .syn_ack_timeout = tcp_syn_ack_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) .mss_clamp = TCP_MSS_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) .req_md5_lookup = tcp_v4_md5_lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) .calc_md5_hash = tcp_v4_md5_hash_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) .init_req = tcp_v4_init_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) #ifdef CONFIG_SYN_COOKIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) .cookie_init_seq = cookie_v4_init_sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) .route_req = tcp_v4_route_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) .init_seq = tcp_v4_init_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) .init_ts_off = tcp_v4_init_ts_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) .send_synack = tcp_v4_send_synack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* Never answer to SYNs send to broadcast or multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return tcp_conn_request(&tcp_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) &tcp_request_sock_ipv4_ops, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) tcp_listendrop(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) EXPORT_SYMBOL(tcp_v4_conn_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * The three way handshake has completed - we got a valid synack -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * now create the new socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct dst_entry *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct request_sock *req_unhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) bool *own_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct inet_request_sock *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) bool found_dup_sk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) struct inet_sock *newinet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct tcp_sock *newtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct sock *newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) const union tcp_md5_addr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int l3index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct ip_options_rcu *inet_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (sk_acceptq_is_full(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto exit_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) newsk = tcp_create_openreq_child(sk, req, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (!newsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) goto exit_nonewsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) newsk->sk_gso_type = SKB_GSO_TCPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) inet_sk_rx_dst_set(newsk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) newtp = tcp_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) newinet = inet_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sk_daddr_set(newsk, ireq->ir_rmt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) newsk->sk_bound_dev_if = ireq->ir_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) newinet->inet_saddr = ireq->ir_loc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) inet_opt = rcu_dereference(ireq->ireq_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) newinet->mc_index = inet_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) newinet->mc_ttl = ip_hdr(skb)->ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) newinet->rcv_tos = ip_hdr(skb)->tos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) inet_csk(newsk)->icsk_ext_hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (inet_opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) newinet->inet_id = prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /* Set ToS of the new socket based upon the value of incoming SYN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * ECT bits are set later in tcp_init_transfer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (!dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dst = inet_csk_route_child_sock(sk, newsk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) goto put_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* syncookie case : see end of cookie_v4_check() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) sk_setup_caps(newsk, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) tcp_ca_openreq_child(newsk, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) tcp_sync_mss(newsk, dst_mtu(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) tcp_initialize_rcv_mss(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /* Copy over the MD5 key from the original socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) addr = (union tcp_md5_addr *)&newinet->inet_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * We're using one, so create a matching key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * on the newsk structure. If we fail to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * memory, then we end up not copying the key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * across. Shucks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) key->key, key->keylen, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (__inet_inherit_port(sk, newsk) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) goto put_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) &found_dup_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (likely(*own_req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) tcp_move_syn(newtp, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) ireq->ireq_opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) newinet->inet_opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (!req_unhash && found_dup_sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* This code path should only be executed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * syncookie case only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) bh_unlock_sock(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) sock_put(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) newsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) exit_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) exit_nonewsk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) tcp_listendrop(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) put_and_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) newinet->inet_opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) inet_csk_prepare_forced_close(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) tcp_done(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) #ifdef CONFIG_SYN_COOKIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (!th->syn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) sk = cookie_v4_check(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct tcphdr *th, u32 *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u16 mss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) #ifdef CONFIG_SYN_COOKIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) &tcp_request_sock_ipv4_ops, sk, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) *cookie = __cookie_v4_init_sequence(iph, th, &mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) tcp_synq_overflow(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* The socket must have it's spinlock held when we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * here, unless it is a TCP_LISTEN socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * We have a potential double-lock case here, so even when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * doing backlog processing we use the BH locking scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * This is because we cannot sleep with the original spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct sock *rsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct dst_entry *dst = sk->sk_rx_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) sock_rps_save_rxhash(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) sk_mark_napi_id(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) !dst->ops->check(dst, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) sk->sk_rx_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) tcp_rcv_established(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (tcp_checksum_complete(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) goto csum_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (sk->sk_state == TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) struct sock *nsk = tcp_v4_cookie_check(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (!nsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (nsk != sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (tcp_child_process(sk, nsk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) rsk = nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) sock_rps_save_rxhash(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (tcp_rcv_state_process(sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) rsk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) tcp_v4_send_reset(rsk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /* Be careful here. If this function gets more complicated and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * gcc suffers from register pressure on the x86, sk (in %ebx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * might be destroyed here. This current version compiles correctly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * but you have been warned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) csum_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) EXPORT_SYMBOL(tcp_v4_do_rcv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int tcp_v4_early_demux(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) const struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (skb->pkt_type != PACKET_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (th->doff < sizeof(struct tcphdr) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) iph->saddr, th->source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) iph->daddr, ntohs(th->dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) skb->skb_iif, inet_sdif(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) skb->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) skb->destructor = sock_edemux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (sk_fullsock(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) dst = dst_check(dst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (dst &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) skb_dst_set_noref(skb, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) u32 tail_gso_size, tail_gso_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct skb_shared_info *shinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) const struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct tcphdr *thtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) struct sk_buff *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) unsigned int hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) bool fragstolen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) u32 gso_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) u32 gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * we can fix skb->truesize to its real value to avoid future drops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * This is valid because skb is not yet charged to the socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * It has been noticed pure SACK packets were sometimes dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * (if cooked by drivers without copybreak feature).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) skb_condense(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (unlikely(tcp_checksum_complete(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /* Attempt coalescing to last skb in backlog, even if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * above the limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) hdrlen = th->doff * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) tail = sk->sk_backlog.tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (!tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) goto no_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) thtail = (struct tcphdr *)tail->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) ((TCP_SKB_CB(tail)->tcp_flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) !((TCP_SKB_CB(tail)->tcp_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) ((TCP_SKB_CB(tail)->tcp_flags ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) #ifdef CONFIG_TLS_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) tail->decrypted != skb->decrypted ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) thtail->doff != th->doff ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) goto no_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) __skb_pull(skb, hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) shinfo = skb_shinfo(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) gso_size = shinfo->gso_size ?: skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) gso_segs = shinfo->gso_segs ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) shinfo = skb_shinfo(tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) tail_gso_segs = shinfo->gso_segs ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) thtail->window = th->window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * thtail->fin, so that the fast path in tcp_rcv_established()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * is not entered if we append a packet with a FIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * SYN, RST, URG are not present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * ACK is set on both packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * PSH : we do not really care in TCP stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * at least for 'GRO' packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) thtail->fin |= th->fin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (TCP_SKB_CB(skb)->has_rxtstamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) TCP_SKB_CB(tail)->has_rxtstamp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) tail->tstamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* Not as strict as GRO. We only need to carry mss max value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) shinfo->gso_size = max(gso_size, tail_gso_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) sk->sk_backlog.len += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) __NET_INC_STATS(sock_net(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) LINUX_MIB_TCPBACKLOGCOALESCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) kfree_skb_partial(skb, fragstolen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) __skb_push(skb, hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) no_coalesce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /* Only socket owner can try to collapse/prune rx queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * to reduce memory overhead, so add a little headroom here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * Few sockets backlog are possibly concurrently non empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) limit += 64*1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (unlikely(sk_add_backlog(sk, skb, limit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) EXPORT_SYMBOL(tcp_add_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) int tcp_filter(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct tcphdr *th = (struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return sk_filter_trim_cap(sk, skb, th->doff * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) EXPORT_SYMBOL(tcp_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static void tcp_v4_restore_cb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) sizeof(struct inet_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) const struct tcphdr *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * barrier() makes sure compiler wont play fool^Waliasing games.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) sizeof(struct inet_skb_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) TCP_SKB_CB(skb)->seq = ntohl(th->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) skb->len - th->doff * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) TCP_SKB_CB(skb)->tcp_tw_isn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) TCP_SKB_CB(skb)->sacked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) TCP_SKB_CB(skb)->has_rxtstamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * From tcp_input.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) int tcp_v4_rcv(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct sk_buff *skb_to_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) int sdif = inet_sdif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) int dif = inet_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) const struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) bool refcounted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (skb->pkt_type != PACKET_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* Count it even if it's bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) __TCP_INC_STATS(net, TCP_MIB_INSEGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) goto bad_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (!pskb_may_pull(skb, th->doff * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /* An explanation is required here, I think.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * Packet length and doff are validated by header prediction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * provided case of th->doff==0 is eliminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * So, we defer the checks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) lookup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) th->dest, sdif, &refcounted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) goto no_tcp_socket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) process:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (sk->sk_state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) goto do_time_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (sk->sk_state == TCP_NEW_SYN_RECV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) struct request_sock *req = inet_reqsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) bool req_stolen = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) struct sock *nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) sk = req->rsk_listener;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) sk_drops_add(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (tcp_checksum_complete(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (unlikely(sk->sk_state != TCP_LISTEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) inet_csk_reqsk_queue_drop_and_put(sk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) goto lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* We own a reference on the listener, increase it again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * as we might lose it too soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) refcounted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) nsk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (!tcp_filter(sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) tcp_v4_fill_cb(skb, iph, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (!nsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (req_stolen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) /* Another cpu got exclusive access to req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * and created a full blown socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * Try to feed this packet to this socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) * instead of discarding it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) tcp_v4_restore_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) goto lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (nsk == sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) reqsk_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) tcp_v4_restore_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) } else if (tcp_child_process(sk, nsk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) tcp_v4_send_reset(nsk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) nf_reset_ct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (tcp_filter(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) th = (const struct tcphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) tcp_v4_fill_cb(skb, iph, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) skb->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (sk->sk_state == TCP_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) ret = tcp_v4_do_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) goto put_and_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) sk_incoming_cpu_update(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) bh_lock_sock_nested(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) tcp_segs_in(tcp_sk(sk), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (!sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) skb_to_free = sk->sk_rx_skb_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) sk->sk_rx_skb_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) ret = tcp_v4_do_rcv(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (tcp_add_backlog(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) goto discard_and_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) skb_to_free = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (skb_to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) __kfree_skb(skb_to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) put_and_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) no_tcp_socket:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) tcp_v4_fill_cb(skb, iph, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (tcp_checksum_complete(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) csum_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) bad_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) __TCP_INC_STATS(net, TCP_MIB_INERRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) tcp_v4_send_reset(NULL, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) discard_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /* Discard frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) discard_and_relse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) sk_drops_add(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (refcounted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) do_time_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) inet_twsk_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) tcp_v4_fill_cb(skb, iph, th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (tcp_checksum_complete(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) inet_twsk_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) goto csum_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) case TCP_TW_SYN: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) &tcp_hashinfo, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) __tcp_hdrlen(th),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) iph->saddr, th->source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) iph->daddr, th->dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) inet_iif(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) sdif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (sk2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) inet_twsk_deschedule_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) sk = sk2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) tcp_v4_restore_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) refcounted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) goto process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) /* to ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) case TCP_TW_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) tcp_v4_timewait_ack(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) case TCP_TW_RST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) tcp_v4_send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) inet_twsk_deschedule_put(inet_twsk(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) case TCP_TW_SUCCESS:;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) goto discard_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static struct timewait_sock_ops tcp_timewait_sock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) .twsk_obj_size = sizeof(struct tcp_timewait_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) .twsk_unique = tcp_twsk_unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) .twsk_destructor= tcp_twsk_destructor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) struct dst_entry *dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (dst && dst_hold_safe(dst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) sk->sk_rx_dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) EXPORT_SYMBOL(inet_sk_rx_dst_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) const struct inet_connection_sock_af_ops ipv4_specific = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) .queue_xmit = ip_queue_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) .send_check = tcp_v4_send_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) .rebuild_header = inet_sk_rebuild_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) .sk_rx_dst_set = inet_sk_rx_dst_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) .conn_request = tcp_v4_conn_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) .syn_recv_sock = tcp_v4_syn_recv_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) .net_header_len = sizeof(struct iphdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) .setsockopt = ip_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) .getsockopt = ip_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) .addr2sockaddr = inet_csk_addr2sockaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) .sockaddr_len = sizeof(struct sockaddr_in),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) .mtu_reduced = tcp_v4_mtu_reduced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) EXPORT_SYMBOL(ipv4_specific);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) .md5_lookup = tcp_v4_md5_lookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) .calc_md5_hash = tcp_v4_md5_hash_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) .md5_parse = tcp_v4_parse_md5_keys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) /* NOTE: A lot of things set to zero explicitly by call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * sk_alloc() so need not be done here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) static int tcp_v4_init_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) tcp_init_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) icsk->icsk_af_ops = &ipv4_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) void tcp_v4_destroy_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) trace_tcp_destroy_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) tcp_clear_xmit_timers(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) tcp_cleanup_congestion_control(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) tcp_cleanup_ulp(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /* Cleanup up the write buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) tcp_write_queue_purge(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) /* Check if we want to disable active TFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) tcp_fastopen_active_disable_ofo_check(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* Cleans up our, hopefully empty, out_of_order_queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) skb_rbtree_purge(&tp->out_of_order_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /* Clean up the MD5 key list, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (tp->md5sig_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) tcp_clear_md5_list(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) tp->md5sig_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) /* Clean up a referenced TCP bind bucket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (inet_csk(sk)->icsk_bind_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) inet_put_port(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) /* If socket is aborted during connect operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) tcp_free_fastopen_req(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) tcp_fastopen_destroy_cipher(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) tcp_saved_syn_free(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) sk_sockets_allocated_dec(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) EXPORT_SYMBOL(tcp_v4_destroy_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* Proc filesystem TCP sock list dumping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * Get next listener socket follow cur. If cur is NULL, get first socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * starting from bucket given in st->bucket; when st->bucket is zero the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * very first socket in the hash table is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static void *listening_get_next(struct seq_file *seq, void *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct tcp_seq_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct inet_listen_hashbucket *ilb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct sock *sk = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (st->bpf_seq_afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) afinfo = st->bpf_seq_afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) afinfo = PDE_DATA(file_inode(seq->file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (!sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) get_head:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) ilb = &tcp_hashinfo.listening_hash[st->bucket];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) spin_lock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) sk = sk_nulls_head(&ilb->nulls_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) st->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) goto get_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) ilb = &tcp_hashinfo.listening_hash[st->bucket];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ++st->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) ++st->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) sk = sk_nulls_next(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) get_sk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) sk_nulls_for_each_from(sk, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (!net_eq(sock_net(sk), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (afinfo->family == AF_UNSPEC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) sk->sk_family == afinfo->family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) spin_unlock(&ilb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) st->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (++st->bucket < INET_LHTABLE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) goto get_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) void *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) st->bucket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) st->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) rc = listening_get_next(seq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) while (rc && *pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) rc = listening_get_next(seq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) --*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) static inline bool empty_bucket(const struct tcp_iter_state *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) * Get first established socket starting from bucket given in st->bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * If st->bucket is zero, the very first socket in the hash is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) static void *established_get_first(struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct tcp_seq_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) void *rc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (st->bpf_seq_afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) afinfo = st->bpf_seq_afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) afinfo = PDE_DATA(file_inode(seq->file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) st->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) /* Lockless fast path for the common case of empty buckets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (empty_bucket(st))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) spin_lock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if ((afinfo->family != AF_UNSPEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) sk->sk_family != afinfo->family) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) !net_eq(sock_net(sk), net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) rc = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) spin_unlock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static void *established_get_next(struct seq_file *seq, void *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct tcp_seq_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct sock *sk = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct hlist_nulls_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (st->bpf_seq_afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) afinfo = st->bpf_seq_afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) afinfo = PDE_DATA(file_inode(seq->file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) ++st->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) ++st->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) sk = sk_nulls_next(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) sk_nulls_for_each_from(sk, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if ((afinfo->family == AF_UNSPEC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) sk->sk_family == afinfo->family) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) net_eq(sock_net(sk), net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) ++st->bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return established_get_first(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static void *established_get_idx(struct seq_file *seq, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) void *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) st->bucket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) rc = established_get_first(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) while (rc && pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) rc = established_get_next(seq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) --pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) void *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) st->state = TCP_SEQ_STATE_LISTENING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) rc = listening_get_idx(seq, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) st->state = TCP_SEQ_STATE_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) rc = established_get_idx(seq, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) static void *tcp_seek_last_pos(struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) int bucket = st->bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) int offset = st->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) int orig_num = st->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) void *rc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) switch (st->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) case TCP_SEQ_STATE_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (st->bucket >= INET_LHTABLE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) st->state = TCP_SEQ_STATE_LISTENING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) rc = listening_get_next(seq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) while (offset-- && rc && bucket == st->bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) rc = listening_get_next(seq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) st->bucket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) st->state = TCP_SEQ_STATE_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) case TCP_SEQ_STATE_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (st->bucket > tcp_hashinfo.ehash_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) rc = established_get_first(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) while (offset-- && rc && bucket == st->bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) rc = established_get_next(seq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) st->num = orig_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) void *rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (*pos && *pos == st->last_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) rc = tcp_seek_last_pos(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) st->state = TCP_SEQ_STATE_LISTENING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) st->num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) st->bucket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) st->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) st->last_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) EXPORT_SYMBOL(tcp_seq_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) void *rc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) rc = tcp_get_idx(seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) switch (st->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) case TCP_SEQ_STATE_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) rc = listening_get_next(seq, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) st->state = TCP_SEQ_STATE_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) st->bucket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) st->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) rc = established_get_first(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) case TCP_SEQ_STATE_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) rc = established_get_next(seq, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) st->last_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) EXPORT_SYMBOL(tcp_seq_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) void tcp_seq_stop(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) struct tcp_iter_state *st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) switch (st->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) case TCP_SEQ_STATE_LISTENING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (v != SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) case TCP_SEQ_STATE_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) EXPORT_SYMBOL(tcp_seq_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) static void get_openreq4(const struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) struct seq_file *f, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) const struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) long delta = req->rsk_timer.expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) seq_printf(f, "%4d: %08X:%04X %08X:%04X"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) ireq->ir_loc_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) ireq->ir_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) ireq->ir_rmt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) ntohs(ireq->ir_rmt_port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) TCP_SYN_RECV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 0, 0, /* could print option size, but that is af dependent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 1, /* timers active (only the expire timer) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) jiffies_delta_to_clock_t(delta),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) req->num_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) from_kuid_munged(seq_user_ns(f),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) sock_i_uid(req->rsk_listener)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 0, /* non standard timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 0, /* open_requests have no inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) int timer_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) unsigned long timer_expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) const struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) const struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) const struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) __be32 dest = inet->inet_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) __be32 src = inet->inet_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) __u16 destp = ntohs(inet->inet_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) __u16 srcp = ntohs(inet->inet_sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) int rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) timer_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) timer_expires = icsk->icsk_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) timer_active = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) timer_expires = icsk->icsk_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) } else if (timer_pending(&sk->sk_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) timer_active = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) timer_expires = sk->sk_timer.expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) timer_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) timer_expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) state = inet_sk_state_load(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (state == TCP_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) rx_queue = READ_ONCE(sk->sk_ack_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) /* Because we don't lock the socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * we might find a transient negative value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) READ_ONCE(tp->copied_seq), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) i, src, srcp, dest, destp, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) READ_ONCE(tp->write_seq) - tp->snd_una,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) timer_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) jiffies_delta_to_clock_t(timer_expires - jiffies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) icsk->icsk_retransmits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) icsk->icsk_probes_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) sock_i_ino(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) refcount_read(&sk->sk_refcnt), sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) jiffies_to_clock_t(icsk->icsk_rto),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) jiffies_to_clock_t(icsk->icsk_ack.ato),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) tp->snd_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) state == TCP_LISTEN ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) fastopenq->max_qlen :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) static void get_timewait4_sock(const struct inet_timewait_sock *tw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct seq_file *f, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) long delta = tw->tw_timer.expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) __be32 dest, src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) __u16 destp, srcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) dest = tw->tw_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) src = tw->tw_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) destp = ntohs(tw->tw_dport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) srcp = ntohs(tw->tw_sport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) seq_printf(f, "%4d: %08X:%04X %08X:%04X"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) refcount_read(&tw->tw_refcnt), tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) #define TMPSZ 150
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) static int tcp4_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct tcp_iter_state *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) struct sock *sk = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) seq_setwidth(seq, TMPSZ - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) seq_puts(seq, " sl local_address rem_address st tx_queue "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) "rx_queue tr tm->when retrnsmt uid timeout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) "inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) st = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (sk->sk_state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) get_timewait4_sock(v, seq, st->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) else if (sk->sk_state == TCP_NEW_SYN_RECV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) get_openreq4(v, seq, st->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) get_tcp4_sock(v, seq, st->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) seq_pad(seq, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) #ifdef CONFIG_BPF_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) struct bpf_iter__tcp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) __bpf_md_ptr(struct bpf_iter_meta *, meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) __bpf_md_ptr(struct sock_common *, sk_common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) uid_t uid __aligned(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) struct sock_common *sk_common, uid_t uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) struct bpf_iter__tcp ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) meta->seq_num--; /* skip SEQ_START_TOKEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) ctx.meta = meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ctx.sk_common = sk_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ctx.uid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) return bpf_iter_run_prog(prog, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) struct bpf_iter_meta meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) struct sock *sk = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) uid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) if (v == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (sk->sk_state == TCP_TIME_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) uid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) const struct request_sock *req = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) uid = from_kuid_munged(seq_user_ns(seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) sock_i_uid(req->rsk_listener));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) meta.seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) prog = bpf_iter_get_info(&meta, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return tcp_prog_seq_show(prog, &meta, v, uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) struct bpf_iter_meta meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) if (!v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) meta.seq = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) prog = bpf_iter_get_info(&meta, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) (void)tcp_prog_seq_show(prog, &meta, v, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) tcp_seq_stop(seq, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) static const struct seq_operations bpf_iter_tcp_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) .show = bpf_iter_tcp_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) .start = tcp_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) .next = tcp_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) .stop = bpf_iter_tcp_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) static const struct seq_operations tcp4_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) .show = tcp4_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) .start = tcp_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) .next = tcp_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) .stop = tcp_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) static struct tcp_seq_afinfo tcp4_seq_afinfo = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) .family = AF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) static int __net_init tcp4_proc_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static void __net_exit tcp4_proc_exit_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) remove_proc_entry("tcp", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static struct pernet_operations tcp4_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) .init = tcp4_proc_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) .exit = tcp4_proc_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) int __init tcp4_proc_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) return register_pernet_subsys(&tcp4_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) void tcp4_proc_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) unregister_pernet_subsys(&tcp4_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) #endif /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) struct proto tcp_prot = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) .name = "TCP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) .close = tcp_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) .pre_connect = tcp_v4_pre_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) .connect = tcp_v4_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) .disconnect = tcp_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) .accept = inet_csk_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) .ioctl = tcp_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) .init = tcp_v4_init_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) .destroy = tcp_v4_destroy_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) .shutdown = tcp_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) .setsockopt = tcp_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) .getsockopt = tcp_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) .keepalive = tcp_set_keepalive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) .recvmsg = tcp_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) .sendmsg = tcp_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) .sendpage = tcp_sendpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) .backlog_rcv = tcp_v4_do_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) .release_cb = tcp_release_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) .hash = inet_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) .unhash = inet_unhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) .get_port = inet_csk_get_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) .enter_memory_pressure = tcp_enter_memory_pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) .leave_memory_pressure = tcp_leave_memory_pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) .stream_memory_free = tcp_stream_memory_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) .sockets_allocated = &tcp_sockets_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) .orphan_count = &tcp_orphan_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) .memory_allocated = &tcp_memory_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) .memory_pressure = &tcp_memory_pressure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) .sysctl_mem = sysctl_tcp_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) .max_header = MAX_TCP_HEADER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) .obj_size = sizeof(struct tcp_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) .slab_flags = SLAB_TYPESAFE_BY_RCU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) .twsk_prot = &tcp_timewait_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) .rsk_prot = &tcp_request_sock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) .h.hashinfo = &tcp_hashinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) .no_autobind = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) .diag_destroy = tcp_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) EXPORT_SYMBOL(tcp_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) static void __net_exit tcp_sk_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) if (net->ipv4.tcp_congestion_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) bpf_module_put(net->ipv4.tcp_congestion_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) net->ipv4.tcp_congestion_control->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) free_percpu(net->ipv4.tcp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) static int __net_init tcp_sk_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) int res, cpu, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) net->ipv4.tcp_sk = alloc_percpu(struct sock *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) if (!net->ipv4.tcp_sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) IPPROTO_TCP, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /* Please enforce IP_DF and IPID==0 for RST and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * ACK sent in SYN-RECV and TIME-WAIT state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) net->ipv4.sysctl_tcp_ecn = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) net->ipv4.sysctl_tcp_ecn_fallback = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) net->ipv4.sysctl_tcp_syncookies = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) net->ipv4.sysctl_tcp_orphan_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) net->ipv4.sysctl_tcp_tw_reuse = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) cnt = tcp_hashinfo.ehash_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) net->ipv4.sysctl_tcp_sack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) net->ipv4.sysctl_tcp_window_scaling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) net->ipv4.sysctl_tcp_timestamps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) net->ipv4.sysctl_tcp_early_retrans = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) net->ipv4.sysctl_tcp_retrans_collapse = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) net->ipv4.sysctl_tcp_max_reordering = 300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) net->ipv4.sysctl_tcp_dsack = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) net->ipv4.sysctl_tcp_app_win = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) net->ipv4.sysctl_tcp_adv_win_scale = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) net->ipv4.sysctl_tcp_frto = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) /* This limits the percentage of the congestion window which we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) * will allow a single TSO frame to consume. Building TSO frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) * which are too large can cause TCP streams to be bursty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) net->ipv4.sysctl_tcp_tso_win_divisor = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) /* Default TSQ limit of 16 TSO segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) /* rfc5961 challenge ack rate limiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) net->ipv4.sysctl_tcp_min_tso_segs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) net->ipv4.sysctl_tcp_autocorking = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) if (net != &init_net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) memcpy(net->ipv4.sysctl_tcp_rmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) init_net.ipv4.sysctl_tcp_rmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) sizeof(init_net.ipv4.sysctl_tcp_rmem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) memcpy(net->ipv4.sysctl_tcp_wmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) init_net.ipv4.sysctl_tcp_wmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) sizeof(init_net.ipv4.sysctl_tcp_wmem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) net->ipv4.sysctl_tcp_comp_sack_nr = 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) atomic_set(&net->ipv4.tfo_active_disable_times, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) /* Reno is always built in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (!net_eq(net, &init_net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) init_net.ipv4.tcp_congestion_control->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) net->ipv4.tcp_congestion_control = &tcp_reno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) tcp_sk_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) inet_twsk_purge(&tcp_hashinfo, AF_INET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) list_for_each_entry(net, net_exit_list, exit_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) tcp_fastopen_ctx_destroy(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) static struct pernet_operations __net_initdata tcp_sk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) .init = tcp_sk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) .exit = tcp_sk_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) .exit_batch = tcp_sk_exit_batch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) struct sock_common *sk_common, uid_t uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) struct tcp_iter_state *st = priv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) struct tcp_seq_afinfo *afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (!afinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) afinfo->family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) st->bpf_seq_afinfo = afinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) ret = bpf_iter_init_seq_net(priv_data, aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) kfree(afinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) static void bpf_iter_fini_tcp(void *priv_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) struct tcp_iter_state *st = priv_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) kfree(st->bpf_seq_afinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) bpf_iter_fini_seq_net(priv_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) static const struct bpf_iter_seq_info tcp_seq_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) .seq_ops = &bpf_iter_tcp_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) .init_seq_private = bpf_iter_init_tcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) .fini_seq_private = bpf_iter_fini_tcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) .seq_priv_size = sizeof(struct tcp_iter_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) static struct bpf_iter_reg tcp_reg_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) .target = "tcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) .ctx_arg_info_size = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) .ctx_arg_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) { offsetof(struct bpf_iter__tcp, sk_common),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) PTR_TO_BTF_ID_OR_NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) .seq_info = &tcp_seq_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static void __init bpf_iter_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (bpf_iter_reg_target(&tcp_reg_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) pr_warn("Warning: could not register bpf iterator tcp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) void __init tcp_v4_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (register_pernet_subsys(&tcp_sk_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) panic("Failed to create the TCP control socket.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) bpf_iter_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) }