^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * INET An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * operating system. INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Implementation of the Transmission Control Protocol(TCP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors: Ross Biro
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Mark Evans, <evansmp@uhura.aston.ac.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Corey Minyard <wf-rch!minyard@relay.EU.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Florian La Roche, <flla@stud.uni-sb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Linus Torvalds, <torvalds@cs.helsinki.fi>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Alan Cox, <gw4pts@gw4pts.ampr.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Matthew Dillon, <dillon@apollo.west.oic.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Jorge Cwik, <jorge@laser.satlink.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/busy_poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (seq == s_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (after(end_seq, s_win) && before(seq, e_win))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return seq == e_win && seq == end_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static enum tcp_tw_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const struct sk_buff *skb, int mib_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) &tcptw->tw_last_oow_ack_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Send ACK. Note, we do not put the bucket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * it will be released by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return TCP_TW_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* We are rate-limiting, so just release the tw sock and drop skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) inet_twsk_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return TCP_TW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * * Main purpose of TIME-WAIT state is to close connection gracefully,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * (and, probably, tail of data) and one or more our ACKs are lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * * What is TIME-WAIT timeout? It is associated with maximal packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * lifetime in the internet, which results in wrong conclusion, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * it is set to catch "old duplicate segments" wandering out of their path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * It is not quite correct. This timeout is calculated so that it exceeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * maximal retransmission timeout enough to allow to lose one (or more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * segments sent by peer and our ACKs. This time may be calculated from RTO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * * When TIME-WAIT socket receives RST, it means that another end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * finally closed and we are allowed to kill TIME-WAIT too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * * Second purpose of TIME-WAIT is catching old duplicate segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Well, certainly it is pure paranoia, but if we load TIME-WAIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * * If we invented some more clever way to catch duplicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * When you compare it to RFCs, please, read section SEGMENT ARRIVES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * from the very beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * NOTE. With recycling (and later with fin-wait-2) TW bucket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * is _not_ stateless. It means, that strictly speaking we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * spinlock it. I do not want! Well, probability of misbehaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * is ridiculously low and, seems, we could use some mb() tricks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * to avoid misread sequence numbers, states etc. --ANK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * We don't need to initialize tmp_out.sack_ok as we don't use the results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) enum tcp_tw_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) const struct tcphdr *th)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct tcp_options_received tmp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) bool paws_reject = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) tmp_opt.saw_tstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (tmp_opt.saw_tstamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (tmp_opt.rcv_tsecr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) tmp_opt.ts_recent = tcptw->tw_ts_recent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (tw->tw_substate == TCP_FIN_WAIT2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Just repeat all the checks of tcp_rcv_state_process() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Out of window, send ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (paws_reject ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) tcptw->tw_rcv_nxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return tcp_timewait_check_oow_rate_limit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (th->rst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) goto kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return TCP_TW_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Dup ACK? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!th->ack ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) inet_twsk_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return TCP_TW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* New data or FIN. If new data arrive after half-duplex close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!th->fin ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return TCP_TW_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* FIN arrived, enter true time-wait state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) tw->tw_substate = TCP_TIME_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (tmp_opt.saw_tstamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) tcptw->tw_ts_recent_stamp = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return TCP_TW_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Now real TIME-WAIT state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * RFC 1122:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * "When a connection is [...] on TIME-WAIT state [...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * [a TCP] MAY accept a new SYN from the remote TCP to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * reopen the connection directly, if it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * (1) assigns its initial sequence number for the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * connection to be larger than the largest sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * number it used on the previous connection incarnation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * (2) returns to TIME-WAIT state if the SYN turns out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * to be an old duplicate".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!paws_reject &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* In window segment, it may be only reset or bare ack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (th->rst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* This is TIME_WAIT assassination, in two flavors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Oh well... nobody has a sufficient solution to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * protocol bug yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) inet_twsk_deschedule_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return TCP_TW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (tmp_opt.saw_tstamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) tcptw->tw_ts_recent_stamp = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) inet_twsk_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return TCP_TW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Out of window segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) All the segments are ACKed immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) The only exception is new SYN. We accept it, if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) not old duplicate and we are not in danger to be killed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) by delayed old duplicates. RFC check is that it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) newer sequence number works at rates <40Mbit/sec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) However, if paws works, it is reliable AND even more,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) we even may relax silly seq space cutoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) RED-PEN: we violate main RFC requirement, if this SYN will appear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) old duplicate (i.e. we receive RST in reply to SYN-ACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) we must return socket to time-wait state. It is not good,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) but not fatal yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (th->syn && !th->rst && !th->ack && !paws_reject &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) (tmp_opt.saw_tstamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (isn == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) isn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) TCP_SKB_CB(skb)->tcp_tw_isn = isn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return TCP_TW_SYN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (paws_reject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!th->rst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* In this case we must reset the TIMEWAIT timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * If it is ACKless SYN it may be both old duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * and new good SYN with random sequence number <rcv_nxt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Do not reschedule in the last case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (paws_reject || th->ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return tcp_timewait_check_oow_rate_limit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) inet_twsk_put(tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return TCP_TW_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) EXPORT_SYMBOL(tcp_timewait_state_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * Move a socket to time-wait or dead fin-wait-2 state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) void tcp_time_wait(struct sock *sk, int state, int timeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) const struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) const struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct inet_timewait_sock *tw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tw = inet_twsk_alloc(sk, tcp_death_row, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (tw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct inet_sock *inet = inet_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) tw->tw_transparent = inet->transparent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tw->tw_mark = sk->sk_mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) tw->tw_priority = sk->sk_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) tcptw->tw_rcv_nxt = tp->rcv_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) tcptw->tw_snd_nxt = tp->snd_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) tcptw->tw_rcv_wnd = tcp_receive_window(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) tcptw->tw_ts_offset = tp->tsoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) tcptw->tw_last_oow_ack_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) tcptw->tw_tx_delay = tp->tcp_tx_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (tw->tw_family == PF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tw->tw_v6_daddr = sk->sk_v6_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) tw->tw_tclass = np->tclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) tw->tw_txhash = sk->sk_txhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) tw->tw_ipv6only = sk->sk_ipv6only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * The timewait bucket does not have the key DB from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * sock structure. We just make a quick copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * md5 key being used (if indeed we are using one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * so the timewait ack generating code has the key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) tcptw->tw_md5_key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (static_branch_unlikely(&tcp_md5_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct tcp_md5sig_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) key = tp->af_specific->md5_lookup(sk, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Get the TIME_WAIT timeout firing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (timeo < rto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) timeo = rto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (state == TCP_TIME_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) timeo = TCP_TIMEWAIT_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* tw_timer is pinned, so we need to make sure BH are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * in following section, otherwise timer handler could run before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * we complete the initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) inet_twsk_schedule(tw, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Linkage updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Note that access to tw after this point is illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Sorry, if we're out of memory, just CLOSE this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * socket up. We've got bigger problems than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * non-graceful socket closings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) tcp_update_metrics(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) tcp_done(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) EXPORT_SYMBOL(tcp_time_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) void tcp_twsk_destructor(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (static_branch_unlikely(&tcp_md5_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct tcp_timewait_sock *twsk = tcp_twsk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (twsk->tw_md5_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) kfree_rcu(twsk->tw_md5_key, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Warning : This function is called without sk_listener being locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Be sure to read socket fields once, as their value could change under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void tcp_openreq_init_rwin(struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) const struct sock *sk_listener,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) const struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) const struct tcp_sock *tp = tcp_sk(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int full_space = tcp_full_space(sk_listener);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u32 window_clamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) __u8 rcv_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 rcv_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) window_clamp = READ_ONCE(tp->window_clamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Set this up on the first call only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* limit the window selection if the user enforce a smaller rx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) req->rsk_window_clamp = full_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (rcv_wnd == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rcv_wnd = dst_metric(dst, RTAX_INITRWND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) else if (full_space < rcv_wnd * mss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) full_space = rcv_wnd * mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* tcp_full_space because it is guaranteed to be the first packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) tcp_select_initial_window(sk_listener, full_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) &req->rsk_rcv_wnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) &req->rsk_window_clamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ireq->wscale_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) &rcv_wscale,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rcv_wnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ireq->rcv_wscale = rcv_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) EXPORT_SYMBOL(tcp_openreq_init_rwin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void tcp_ecn_openreq_child(struct tcp_sock *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) const struct request_sock *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) bool ca_got_dst = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (ca_key != TCP_CA_UNSPEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) const struct tcp_congestion_ops *ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ca = tcp_ca_find_key(ca_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) icsk->icsk_ca_ops = ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ca_got_dst = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* If no valid choice made yet, assign current system default ca. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!ca_got_dst &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) (!icsk->icsk_ca_setsockopt ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) tcp_assign_congestion_control(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tcp_set_ca_state(sk, TCP_CA_Open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct tcp_sock *newtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #if IS_ENABLED(CONFIG_SMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct inet_request_sock *ireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (static_branch_unlikely(&tcp_have_smc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (oldtp->syn_smc && !ireq->smc_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) newtp->syn_smc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* This is not only more efficient than what we used to do, it eliminates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * Actually, we could lots of memory writes here. tp of listening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * socket contains all necessary default parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct sock *tcp_create_openreq_child(const struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) const struct inet_request_sock *ireq = inet_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct tcp_request_sock *treq = tcp_rsk(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct inet_connection_sock *newicsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct tcp_sock *oldtp, *newtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) u32 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!newsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) newicsk = inet_csk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) newtp = tcp_sk(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) oldtp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) smc_check_reset_syn_req(oldtp, req, newtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Now setup tcp_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) newtp->pred_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) seq = treq->rcv_isn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) newtp->rcv_wup = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) WRITE_ONCE(newtp->copied_seq, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) WRITE_ONCE(newtp->rcv_nxt, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) newtp->segs_in = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) seq = treq->snt_isn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) newtp->snd_sml = newtp->snd_una = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) WRITE_ONCE(newtp->snd_nxt, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) newtp->snd_up = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) INIT_LIST_HEAD(&newtp->tsq_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) tcp_init_wl(newtp, treq->rcv_isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) newtp->lsndtime = tcp_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) newsk->sk_txhash = treq->txhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) newtp->total_retrans = req->num_retrans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) tcp_init_xmit_timers(newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (sock_flag(newsk, SOCK_KEEPOPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) inet_csk_reset_keepalive_timer(newsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) keepalive_time_when(newtp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) newtp->rx_opt.sack_ok = ireq->sack_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) newtp->window_clamp = req->rsk_window_clamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) newtp->rcv_ssthresh = req->rsk_rcv_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) newtp->rcv_wnd = req->rsk_rcv_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) newtp->rx_opt.wscale_ok = ireq->wscale_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (newtp->rx_opt.wscale_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) newtp->rx_opt.snd_wscale = ireq->snd_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) newtp->window_clamp = min(newtp->window_clamp, 65535U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) newtp->max_window = newtp->snd_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (newtp->rx_opt.tstamp_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) newtp->rx_opt.ts_recent = req->ts_recent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) newtp->rx_opt.ts_recent_stamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) newtp->tcp_header_len = sizeof(struct tcphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (req->num_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) newtp->undo_marker = treq->snt_isn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) newtp->retrans_stamp = div_u64(treq->snt_synack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) USEC_PER_SEC / TCP_TS_HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) newtp->tsoffset = treq->ts_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #ifdef CONFIG_TCP_MD5SIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) newtp->md5sig_info = NULL; /*XXX*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (newtp->af_specific->md5_lookup(sk, newsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) newtp->rx_opt.mss_clamp = req->mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) tcp_ecn_openreq_child(newtp, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) newtp->fastopen_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tcp_bpf_clone(sk, newsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return newsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) EXPORT_SYMBOL(tcp_create_openreq_child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * Process an incoming packet for SYN_RECV sockets represented as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * request_sock. Normally sk is the listener socket but for TFO it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * points to the child socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * XXX (TFO) - The current impl contains a special check for ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * We don't need to initialize tmp_opt.sack_ok as we don't use the results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct request_sock *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) bool fastopen, bool *req_stolen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct tcp_options_received tmp_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct sock *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) const struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) bool paws_reject = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) bool own_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) tmp_opt.saw_tstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (th->doff > (sizeof(struct tcphdr)>>2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (tmp_opt.saw_tstamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) tmp_opt.ts_recent = req->ts_recent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (tmp_opt.rcv_tsecr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* We do not store true stamp, but it is not required,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * it can be estimated (approximately)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * from another data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Check for pure retransmitted SYN. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) flg == TCP_FLAG_SYN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) !paws_reject) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * RFC793 draws (Incorrectly! It was fixed in RFC1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * this case on figure 6 and figure 8, but formal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * protocol description says NOTHING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * To be more exact, it says that we should send ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * because this segment (at least, if it has no data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * is out of window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * describe SYN-RECV state. All the description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * is wrong, we cannot believe to it and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * rely only on common sense and implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * experience.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Enforce "SYN-ACK" according to figure 8, figure 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * of RFC793, fixed by RFC1122.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Note that even if there is new data in the SYN packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * they will be thrown away too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * Reset timer after retransmitting SYNACK, similar to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * the idea of fast retransmit in recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!tcp_oow_rate_limited(sock_net(sk), skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) LINUX_MIB_TCPACKSKIPPEDSYNRECV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) &tcp_rsk(req)->last_oow_ack_time) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) !inet_rtx_syn_ack(sk, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned long expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) TCP_RTO_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!fastopen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mod_timer_pending(&req->rsk_timer, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) req->rsk_timer.expires = expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Further reproduces section "SEGMENT ARRIVES"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for state SYN-RECEIVED of RFC793.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) It is broken, however, it does not work only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) when SYNs are crossed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) You would think that SYN crossing is impossible here, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) we should have a SYN_SENT socket (from connect()) on our end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) but this is not true if the crossed SYNs were sent to both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ends by a malicious third party. We must defend against this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) and to do that we first verify the ACK (as per RFC793, page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 36) and reset if it is invalid. Is this a true full defense?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) To convince ourselves, let us consider a way in which the ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) test can still pass in this 'malicious crossed SYNs' case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) Malicious sender sends identical SYNs (and thus identical sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) numbers) to both A and B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) A: gets SYN, seq=7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) B: gets SYN, seq=7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) By our good fortune, both A and B select the same initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) send sequence number of seven :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) A: sends SYN|ACK, seq=7, ack_seq=8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) B: sends SYN|ACK, seq=7, ack_seq=8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) So we are now A eating this SYN|ACK, ACK test passes. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) does sequence test, SYN is truncated, and thus we consider
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) it a bare ACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) bare ACK. Otherwise, we create an established connection. Both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ends (listening sockets) accept the new incoming connection and try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) to talk to each other. 8-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) Note: This case is both harmless, and rare. Possibility is about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) same as us discovering intelligent life on another plant tomorrow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) But generally, we should (RFC lies!) to accept ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) from SYNACK both here and in tcp_rcv_state_process().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tcp_rcv_state_process() does not, hence, we do not too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) Note that the case is absolutely generic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) we cannot optimize anything here without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) violating protocol. All the checks must be made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) before attempt to create socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* RFC793 page 36: "If the connection is in any non-synchronized state ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * and the incoming segment acknowledges something not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * sent (the segment carries an unacceptable ACK) ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * a reset is sent."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * Invalid ACK: reset will be sent by listening socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Note that the ACK validity check for a Fast Open socket is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * elsewhere and is checked directly against the child socket rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * than req because user data may have been sent out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if ((flg & TCP_FLAG_ACK) && !fastopen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) (TCP_SKB_CB(skb)->ack_seq !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) tcp_rsk(req)->snt_isn + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Also, it would be not so bad idea to check rcv_tsecr, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * is essentially ACK extension and too early or too late values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * should cause reset in unsynchronized states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* RFC793: "first check sequence number". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Out of window: send ACK and drop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!(flg & TCP_FLAG_RST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) !tcp_oow_rate_limited(sock_net(sk), skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) LINUX_MIB_TCPACKSKIPPEDSYNRECV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) &tcp_rsk(req)->last_oow_ack_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) req->rsk_ops->send_ack(sk, skb, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (paws_reject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* In sequence, PAWS is OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) req->ts_recent = tmp_opt.rcv_tsval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Truncate SYN, it is out of window starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) at tcp_rsk(req)->rcv_isn + 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) flg &= ~TCP_FLAG_SYN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* RFC793: "second check the RST bit" and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * "fourth, check the SYN bit"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) goto embryonic_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* ACK sequence verified above, just make sure ACK is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * set. If ACK not set, just silently drop the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * XXX (TFO) - if we ever allow "data after SYN", the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * following check needs to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!(flg & TCP_FLAG_ACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* For Fast Open no more processing is needed (sk is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * child socket).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (fastopen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) inet_rsk(req)->acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* OK, ACK is valid, create big socket and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * feed this segment to it. It will repeat all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * the tests. THIS SEGMENT MUST MOVE SOCKET TO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * ESTABLISHED STATE. If it will be dropped after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * socket is created, wait for troubles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) req, &own_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto listen_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (own_req && rsk_drop_req(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) inet_csk_reqsk_queue_drop_and_put(sk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) sock_rps_save_rxhash(child, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) tcp_synack_rtt_meas(child, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) *req_stolen = !own_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return inet_csk_complete_hashdance(sk, child, req, own_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) listen_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) inet_rsk(req)->acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) embryonic_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!(flg & TCP_FLAG_RST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Received a bad SYN pkt - for TFO We try not to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * the local connection unless it's really necessary to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * avoid becoming vulnerable to outside attack aiming at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * resetting legit local connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) req->rsk_ops->send_reset(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else if (fastopen) { /* received a valid RST pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) reqsk_fastopen_remove(sk, req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) tcp_reset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!fastopen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (unlinked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *req_stolen = !unlinked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) EXPORT_SYMBOL(tcp_check_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Queue segment on the new socket if the new socket is active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * otherwise we just shortcircuit this and continue with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * the new socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * when entering. But other states are possible due to a race condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * where after __inet_lookup_established() fails but before the listener
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * locked is obtained, other packets cause the same connection to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int tcp_child_process(struct sock *parent, struct sock *child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) __releases(&((child)->sk_lock.slock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int state = child->sk_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* record NAPI ID of child */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) sk_mark_napi_id(child, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) tcp_segs_in(tcp_sk(child), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!sock_owned_by_user(child)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ret = tcp_rcv_state_process(child, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* Wakeup parent, send SIGIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (state == TCP_SYN_RECV && child->sk_state != state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) parent->sk_data_ready(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Alas, it is possible again, because we do lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * in main socket hash table and lock on listening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * socket does not protect us more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) __sk_add_backlog(child, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bh_unlock_sock(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) sock_put(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) EXPORT_SYMBOL(tcp_child_process);