^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) return t1 > t2 || (t1 == t2 && after(seq1, seq2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) static u32 tcp_rack_reo_wnd(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) if (!tp->reord_seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* If reordering has not been observed, be aggressive during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * the recovery or starting the recovery by DUPACK threshold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (tp->sacked_out >= tp->reordering &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* To be more reordering resilient, allow min_rtt/4 settling delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Use min_rtt instead of the smoothed RTT because reordering is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * often a path property and less related to queuing or delayed ACKs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Upon receiving DSACKs, linearly increase the window up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * smoothed RTT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) tp->srtt_us >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return tp->rack.rtt_us + reo_wnd -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Marks a packet lost, if some packet sent later has been (s)acked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The underlying idea is similar to the traditional dupthresh and FACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * but they look at different metrics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * dupthresh: 3 OOO packets delivered (packet count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * FACK: sequence delta to highest sacked sequence (sequence space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * RACK: sent time delta to the latest delivered packet (time domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * The advantage of RACK is it applies to both original and retransmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * packet and therefore is robust against tail losses. Another advantage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * is being more resilient to reordering by simply allowing some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * "settling delay", instead of tweaking the dupthresh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * When tcp_rack_detect_loss() detects some packets are lost and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * make us enter the CA_Recovery state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct sk_buff *skb, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 reo_wnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *reo_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) reo_wnd = tcp_rack_reo_wnd(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) tcp_tsorted_anchor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) s32 remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Skip ones marked lost but not yet retransmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if ((scb->sacked & TCPCB_LOST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) !(scb->sacked & TCPCB_SACKED_RETRANS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!tcp_rack_sent_after(tp->rack.mstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) tcp_skb_timestamp_us(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) tp->rack.end_seq, scb->end_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* A packet is lost if it has not been s/acked beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * the recent RTT plus the reordering window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (remaining <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) tcp_mark_skb_lost(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) list_del_init(&skb->tcp_tsorted_anchor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Record maximum wait time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *reo_timeout = max_t(u32, *reo_timeout, remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bool tcp_rack_mark_lost(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u32 timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!tp->rack.advanced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Reset the advanced flag to avoid unnecessary queue scanning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) tp->rack.advanced = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) tcp_rack_detect_loss(sk, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) timeout, inet_csk(sk)->icsk_rto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return !!timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Record the most recently (re)sent time among the (s)acked packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * draft-cheng-tcpm-rack-00.txt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u64 xmit_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* If the sacked packet was retransmitted, it's ambiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * whether the retransmission or the original (or the prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * retransmission) was sacked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * If the original is lost, there is no ambiguity. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * we assume the original can be delayed up to aRTT + min_rtt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * the aRTT term is bounded by the fast recovery or timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * so it's at least one RTT (i.e., retransmission is at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * an RTT later).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) tp->rack.advanced = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) tp->rack.rtt_us = rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) end_seq, tp->rack.end_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) tp->rack.mstamp = xmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) tp->rack.end_seq = end_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* We have waited long enough to accommodate reordering. Mark the expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * packets lost and retransmit them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void tcp_rack_reo_timeout(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u32 timeout, prior_inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) prior_inflight = tcp_packets_in_flight(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) tcp_rack_detect_loss(sk, &timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (prior_inflight != tcp_packets_in_flight(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) tcp_enter_recovery(sk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!inet_csk(sk)->icsk_ca_ops->cong_control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) tcp_cwnd_reduction(sk, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) tcp_xmit_retransmit_queue(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) tcp_rearm_rto(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * by srtt), since there is possibility that spurious retransmission was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * due to reordering delay longer than reo_wnd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * no. of successful recoveries (accounts for full DSACK-based loss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * recovery undo). After that, reset it to default (min_rtt/4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * At max, reo_wnd is incremented only once per rtt. So that the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * DSACK on which we are reacting, is due to the spurious retx (approx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * after the reo_wnd has been updated last time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * absolute value to account for change in rtt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) !rs->prior_delivered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (before(rs->prior_delivered, tp->rack.last_delivered))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) tp->rack.dsack_seen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Adjust the reo_wnd if update is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (tp->rack.dsack_seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) tp->rack.reo_wnd_steps + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) tp->rack.dsack_seen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) tp->rack.last_delivered = tp->delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) } else if (!tp->rack.reo_wnd_persist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) tp->rack.reo_wnd_steps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * the next unacked packet upon receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * a) three or more DUPACKs to start the fast recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * b) an ACK acknowledging new data during the fast recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) const u8 state = inet_csk(sk)->icsk_ca_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) (state == TCP_CA_Recovery && snd_una_advanced)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct sk_buff *skb = tcp_rtx_queue_head(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mss = tcp_skb_mss(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mss, mss, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tcp_mark_skb_lost(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }