^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* Bottleneck Bandwidth and RTT (BBR) congestion control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * BBR congestion control computes the sending rate based on the delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * rate (throughput) estimated from ACKs. In a nutshell:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * On each ACK, update our model of the network path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * min_rtt = windowed_min(rtt, 10 seconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * pacing_rate = pacing_gain * bottleneck_bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * The core algorithm does not react directly to packet losses or delays,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * although BBR may adjust the size of next send per ACK when loss is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * observed, or adjust the sending rate if it estimates there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * traffic policer, in order to keep the drop rate reasonable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Here is a state transition diagram for BBR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * +---> STARTUP ----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * | V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * | DRAIN ----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * | V |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * +---> PROBE_BW ----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * | ^ | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * | +----+ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * +---- PROBE_RTT <--+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * When it estimates the pipe is full, it enters DRAIN to drain the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * A long-lived BBR flow spends the vast majority of its time remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * in a fair manner, with a small, bounded queue. *If* a flow has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * continuously sending for the entire min_rtt window, and hasn't seen an RTT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * sample that matches or decreases its min_rtt estimate for 10 seconds, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * otherwise we enter STARTUP to try to fill the pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * BBR is described in detail in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * "BBR: Congestion-Based Congestion Control",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * There is a public e-mail list for discussing BBR development and testing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * https://groups.google.com/forum/#!forum/bbr-dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * otherwise TCP stack falls back to an internal pacing using one high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * resolution timer per TCP socket and may use more resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/inet_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/win_minmax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Since the minimum window is >=4 packets, the lower bound isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * an issue. The upper bound isn't an issue with existing technologies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define BW_SCALE 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define BW_UNIT (1 << BW_SCALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define BBR_UNIT (1 << BBR_SCALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* BBR has the following modes for deciding how fast to send: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum bbr_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) BBR_DRAIN, /* drain any queue created during startup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* BBR congestion control block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct bbr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 min_rtt_stamp; /* timestamp of min_rtt_us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 rtt_cnt; /* count of packet-timed rounds elapsed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u64 cycle_mstamp; /* time of this cycle phase start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u32 mode:3, /* current bbr_mode in state machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) prev_ca_state:3, /* CA state on previous ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) packet_conservation:1, /* use packet conservation? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) round_start:1, /* start of packet-timed tx->ack round? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) idle_restart:1, /* restarting after idle? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unused:13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) lt_is_sampling:1, /* taking long-term ("LT") samples now? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) lt_rtt_cnt:7, /* round trips in long-term interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) lt_use_bw:1; /* use lt_bw as our bw estimate? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 lt_last_delivered; /* LT intvl start: tp->delivered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 lt_last_lost; /* LT intvl start: tp->lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 pacing_gain:10, /* current gain for setting pacing rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cwnd_gain:10, /* current gain for setting cwnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) full_bw_reached:1, /* reached full bw in Startup? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) full_bw_cnt:2, /* number of rounds without large bw gains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cycle_idx:3, /* current index in pacing_gain cycle array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) has_seen_rtt:1, /* have we seen an RTT sample yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unused_b:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 full_bw; /* recent bw, to estimate if pipe is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* For tracking ACK aggregation: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u64 ack_epoch_mstamp; /* start of ACK sampling epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u16 extra_acked[2]; /* max excess data ACKed in epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) extra_acked_win_idx:1, /* current index in extra_acked array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unused_c:6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Window length of bw filter (in rounds): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static const int bbr_bw_rtts = CYCLE_LEN + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Window length of min_rtt filter (in sec): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static const u32 bbr_min_rtt_win_sec = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static const u32 bbr_probe_rtt_mode_ms = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Skip TSO below the following bandwidth (bits/sec): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static const int bbr_min_tso_rate = 1200000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * In order to help drive the network toward lower queues and low latency while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * maintaining high utilization, the average pacing rate aims to be slightly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * lower than the estimated bandwidth. This is an important aspect of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * design.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static const int bbr_pacing_margin_percent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * that will allow a smoothly increasing pacing rate that will double each RTT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * and send the same number of packets per RTT that an un-paced, slow-starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Reno or CUBIC flow would:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * the queue created in BBR_STARTUP in a single round:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static const int bbr_cwnd_gain = BBR_UNIT * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static const int bbr_pacing_gain[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BBR_UNIT * 5 / 4, /* probe for more available bw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Randomize the starting gain cycling phase over N phases: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static const u32 bbr_cycle_rand = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Try to keep at least this many packets in flight, if things go smoothly. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * smooth functioning, a sliding window protocol ACKing every other packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * needs at least 4 packets in flight:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static const u32 bbr_cwnd_min_target = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* If bw has increased significantly (1.25x), there may be more bw available: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static const u32 bbr_full_bw_cnt = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* "long-term" ("LT") bandwidth estimator parameters... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* The minimum number of rounds in an LT bw sampling interval: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static const u32 bbr_lt_intvl_min_rtts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static const u32 bbr_lt_loss_thresh = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static const u32 bbr_lt_bw_diff = 4000 / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* If we estimate we're policed, use lt_bw for this many round trips: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static const u32 bbr_lt_bw_max_rtts = 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Gain factor for adding extra_acked to target cwnd: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static const int bbr_extra_acked_gain = BBR_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Window length of extra_acked window. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static const u32 bbr_extra_acked_win_rtts = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Time period for clamping cwnd increment due to ack aggregation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static const u32 bbr_extra_acked_max_us = 100 * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void bbr_check_probe_rtt_done(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Do we estimate that STARTUP filled the pipe? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static bool bbr_full_bw_reached(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) const struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return bbr->full_bw_reached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static u32 bbr_max_bw(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return minmax_get(&bbr->bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static u32 bbr_bw(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Return maximum extra acked in past k-2k round trips,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * where k = bbr_extra_acked_win_rtts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static u16 bbr_extra_acked(const struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return max(bbr->extra_acked[0], bbr->extra_acked[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Return rate in bytes per second, optionally with a gain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * The order here is chosen carefully to avoid overflow of u64. This should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned int mss = tcp_sk(sk)->mss_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) rate *= mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rate *= gain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rate >>= BBR_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return rate >> BW_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u64 rate = bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) rate = bbr_rate_bytes_per_sec(sk, rate, gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rate = min_t(u64, rate, sk->sk_max_pacing_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u64 bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u32 rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (tp->srtt_us) { /* any RTT sample yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) rtt_us = max(tp->srtt_us >> 3, 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bbr->has_seen_rtt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) } else { /* no RTT sample yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bw = (u64)tp->snd_cwnd * BW_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) do_div(bw, rtt_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Pace using current bw estimate and a gain factor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) bbr_init_pacing_rate_from_rtt(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) sk->sk_pacing_rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* override sysctl_tcp_min_tso_segs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static u32 bbr_min_tso_segs(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static u32 bbr_tso_segs_goal(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u32 segs, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Sort of tcp_tso_autosize() but ignoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * driver provided sk_gso_max_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) bytes = min_t(unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return min(segs, 0x7FU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void bbr_save_cwnd(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (event == CA_EVENT_TX_START && tp->app_limited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bbr->idle_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bbr->ack_epoch_mstamp = tp->tcp_mstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) bbr->ack_epoch_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Avoid pointless buffer overflows: pace at est. bw if we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * need more speed (we're restarting from idle and app-limited).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (bbr->mode == BBR_PROBE_BW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else if (bbr->mode == BBR_PROBE_RTT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) bbr_check_probe_rtt_done(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * bdp = ceil(bw * min_rtt * gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * The key factor, gain, controls the amount of queue. While a small gain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * builds a smaller queue, it becomes more vulnerable to noise in RTT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * measurements (e.g., delayed ACKs or other ACK compression effects). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * noise may cause BBR to under-estimate the rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) u64 w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* If we've never had a valid RTT sample, cap cwnd at the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * default. This should only happen when the connection is not using TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * timestamps and has retransmitted all of the SYN/SYNACK/data packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * case we need to slow-start up toward something safe: TCP_INIT_CWND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) w = (u64)bw * bbr->min_rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Apply a gain to the given value, remove the BW_SCALE shift, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * round the value up to avoid a negative feedback loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return bdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* To achieve full performance in high-speed paths, we budget enough cwnd to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * - one skb in sending host Qdisc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * - one skb in sending host TSO/GSO engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * which allows 2 outstanding 2-packet sequences, to try to keep pipe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * full even with ACK-every-other-packet delayed ACKs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Allow enough full-sized skbs in flight to utilize end systems. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) cwnd += 3 * bbr_tso_segs_goal(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cwnd = (cwnd + 1) & ~1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cwnd += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u32 inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) inflight = bbr_bdp(sk, bw, gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) inflight = bbr_quantization_budget(sk, inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* With pacing at lower layers, there's often less data "in the network" than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * we often have several skbs queued in the pacing layer with a pre-scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * earliest departure time (EDT). BBR adapts its pacing rate based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * inflight level that it estimates has already been "baked in" by previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * departure time decisions. We calculate a rough estimate of the number of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * packets that might be in the network at the earliest departure time for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * next skb scheduled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * in_network_at_edt = inflight_at_edt - (EDT - now) * bw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * If we're increasing inflight, then we want to know if the transmit of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * EDT skb will push inflight above the target, so inflight_at_edt includes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * then estimate if inflight will sink too low just before the EDT transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u64 now_ns, edt_ns, interval_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u32 interval_delivered, inflight_at_edt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) now_ns = tp->tcp_clock_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) edt_ns = max(tp->tcp_wstamp_ns, now_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) inflight_at_edt = inflight_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) inflight_at_edt += bbr_tso_segs_goal(sk); /* include EDT skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (interval_delivered >= inflight_at_edt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return inflight_at_edt - interval_delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Find the cwnd increment based on estimate of ack aggregation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u32 max_aggr_cwnd, aggr_cwnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) / BW_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) >> BBR_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return aggr_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* An optimization in BBR to reduce losses: On the first round of recovery, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * follow the packet conservation principle: send P packets per P packets acked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * After that, we slow-start and send at most 2*P packets per P packets acked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * After recovery finishes, or upon undo, we restore the cwnd we had when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * recovery started (capped by the target cwnd based on estimated BDP).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * TODO(ycheng/ncardwell): implement a rate-based approach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static bool bbr_set_cwnd_to_recover_or_restore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u32 cwnd = tp->snd_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* An ACK for P pkts should release at most 2*P packets. We do this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * in two steps. First, here we deduct the number of lost packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (rs->losses > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) cwnd = max_t(s32, cwnd - rs->losses, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Starting 1st round of Recovery, so do packet conservation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) bbr->packet_conservation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bbr->next_rtt_delivered = tp->delivered; /* start round now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) cwnd = tcp_packets_in_flight(tp) + acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Exiting loss recovery; restore cwnd saved before recovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) cwnd = max(cwnd, bbr->prior_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) bbr->packet_conservation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) bbr->prev_ca_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (bbr->packet_conservation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return true; /* yes, using packet conservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *new_cwnd = cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * has drawn us down below target), or snap down to target if we're above it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) u32 acked, u32 bw, int gain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) goto done; /* no packet fully ACKed; just apply caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) target_cwnd = bbr_bdp(sk, bw, gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Increment the cwnd to account for excess ACKed data that seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * due to aggregation (of data and/or ACKs) visible in the ACK stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) target_cwnd += bbr_ack_aggregation_cwnd(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) target_cwnd = bbr_quantization_budget(sk, target_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* If we're below target cwnd, slow start cwnd toward target cwnd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cwnd = min(cwnd + acked, target_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) cwnd = cwnd + acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) cwnd = max(cwnd, bbr_cwnd_min_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* End cycle phase if it's time and/or we hit the phase's in-flight target. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static bool bbr_is_next_cycle_phase(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) bool is_full_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) bbr->min_rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) u32 inflight, bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * use the pipe without increasing the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (bbr->pacing_gain == BBR_UNIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return is_full_length; /* just use wall clock time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) bw = bbr_max_bw(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * small (e.g. on a LAN). We do not persist if packets are lost, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * a path with small buffers may not hold that much.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (bbr->pacing_gain > BBR_UNIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return is_full_length &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) (rs->losses || /* perhaps pacing_gain*BDP won't fit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * probing didn't find more bw. If inflight falls to match BDP then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * estimate queue is drained; persisting would underutilize the pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return is_full_length ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) inflight <= bbr_inflight(sk, bw, BBR_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void bbr_advance_cycle_phase(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) bbr->cycle_mstamp = tp->delivered_mstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static void bbr_update_cycle_phase(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) bbr_advance_cycle_phase(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void bbr_reset_startup_mode(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) bbr->mode = BBR_STARTUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static void bbr_reset_probe_bw_mode(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bbr->mode = BBR_PROBE_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void bbr_reset_mode(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!bbr_full_bw_reached(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) bbr_reset_startup_mode(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) bbr_reset_probe_bw_mode(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Start a new long-term sampling interval. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) bbr->lt_last_delivered = tp->delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) bbr->lt_last_lost = tp->lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) bbr->lt_rtt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Completely reset long-term bandwidth sampling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void bbr_reset_lt_bw_sampling(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) bbr->lt_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) bbr->lt_use_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) bbr->lt_is_sampling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) bbr_reset_lt_bw_sampling_interval(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Long-term bw sampling interval is done. Estimate whether we're policed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) u32 diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (bbr->lt_bw) { /* do we have bw from a previous interval? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* Is new bw close to the lt_bw from the previous interval? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) diff = abs(bw - bbr->lt_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) bbr_lt_bw_diff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* All criteria are met; estimate we're policed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) bbr->lt_use_bw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) bbr->lt_rtt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bbr->lt_bw = bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) bbr_reset_lt_bw_sampling_interval(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * explicitly models their policed rate, to reduce unnecessary losses. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * estimate that we're policed if we see 2 consecutive sampling intervals with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * consistent throughput and high packet loss. If we think we're being policed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) u32 lost, delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) u64 bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) u32 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Wait for the first loss before sampling, to let the policer exhaust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * its tokens and estimate the steady-state rate allowed by the policer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * Starting samples earlier includes bursts that over-estimate the bw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!bbr->lt_is_sampling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!rs->losses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) bbr_reset_lt_bw_sampling_interval(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bbr->lt_is_sampling = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* To avoid underestimates, reset sampling if we run out of data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (rs->is_app_limited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) bbr_reset_lt_bw_sampling(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (bbr->round_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) bbr->lt_rtt_cnt++; /* count round trips in this interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return; /* sampling interval needs to be longer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bbr_reset_lt_bw_sampling(sk); /* interval is too long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* End sampling interval when a packet is lost, so we estimate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * policer tokens were exhausted. Stopping the sampling before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * tokens are exhausted under-estimates the policed rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (!rs->losses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Calculate packets lost and delivered in sampling interval. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) lost = tp->lost - bbr->lt_last_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) delivered = tp->delivered - bbr->lt_last_delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Find average delivery rate in this sampling interval. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if ((s32)t < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return; /* interval is less than one ms, so wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Check if can multiply without overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (t >= ~0U / USEC_PER_MSEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) t *= USEC_PER_MSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) bw = (u64)delivered * BW_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) do_div(bw, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) bbr_lt_bw_interval_done(sk, bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Estimate the bandwidth based on how fast packets are delivered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u64 bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) bbr->round_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (rs->delivered < 0 || rs->interval_us <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return; /* Not a valid observation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* See if we've reached the next RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) bbr->next_rtt_delivered = tp->delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) bbr->rtt_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) bbr->round_start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) bbr->packet_conservation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) bbr_lt_bw_sampling(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* Divide delivered by the interval to find a (lower bound) bottleneck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * bandwidth sample. Delivered is in packets and interval_us in uS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * ratio will be <<1 for most connections. So delivered is first scaled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* If this sample is application-limited, it is likely to have a very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * low delivered count that represents application behavior rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * the available network rate. Such a sample could drag down estimated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * bw, causing needless slow-down. Thus, to continue to send at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * last measured network rate, we filter out app-limited samples unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * they describe the path bw at least as well as our bw model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * So the goal during app-limited phase is to proceed with the best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * network rate no matter how long. We automatically leave this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * phase when app writes faster than the network can deliver :)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Incorporate new sample into our max bw filter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /* Estimates the windowed max degree of ack aggregation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * This is used to provision extra in-flight data to keep sending during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * inter-ACK silences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Degree of ack aggregation is estimated as extra data acked beyond expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * cwnd += max_extra_acked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * Max filter is an approximate sliding window of 5-10 (packet timed) round
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * trips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static void bbr_update_ack_aggregation(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) u32 epoch_us, expected_acked, extra_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) rs->delivered < 0 || rs->interval_us <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (bbr->round_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) bbr->extra_acked_win_rtts = min(0x1F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) bbr->extra_acked_win_rtts + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) bbr->extra_acked_win_rtts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Compute how many packets we expected to be delivered over epoch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) bbr->ack_epoch_mstamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Reset the aggregation epoch if ACK rate is below expected rate or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * significantly large no. of ack received since epoch (potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * quite old epoch).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (bbr->ack_epoch_acked <= expected_acked ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) (bbr->ack_epoch_acked + rs->acked_sacked >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bbr_ack_epoch_acked_reset_thresh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) bbr->ack_epoch_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bbr->ack_epoch_mstamp = tp->delivered_mstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) expected_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* Compute excess data delivered, beyond what was expected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) bbr->ack_epoch_acked + rs->acked_sacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) extra_acked = bbr->ack_epoch_acked - expected_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) extra_acked = min(extra_acked, tp->snd_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* Estimate when the pipe is full, using the change in delivery rate: BBR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * higher rwin, 3: we get higher delivery rate samples. Or transient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void bbr_check_full_bw_reached(struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) u32 bw_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (bbr_max_bw(sk) >= bw_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) bbr->full_bw = bbr_max_bw(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) bbr->full_bw_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ++bbr->full_bw_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* If pipe is probably full, drain the queue and then enter steady-state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) bbr->mode = BBR_DRAIN; /* drain queue we created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) tcp_sk(sk)->snd_ssthresh =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) } /* fall through to check if in-flight is already small: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (bbr->mode == BBR_DRAIN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static void bbr_check_probe_rtt_done(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (!(bbr->probe_rtt_done_stamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) bbr_reset_mode(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * periodically drain the bottleneck queue, to converge to measure the true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * min_rtt (unloaded propagation delay). This allows the flows to keep queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * small (reducing queuing delay and packet loss) and achieve fairness among
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * BBR flows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * re-enter the previous mode. BBR uses 200ms to approximately bound the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * Note that flows need only pay 2% if they are busy sending over the last 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * natural silences or low-rate periods within 10 seconds where the rate is low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * enough for long enough to drain its queue in the bottleneck. We pick up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * these min RTT measurements opportunistically with our min_rtt filter. :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) bool filter_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Track min RTT seen in the min_rtt_win_sec filter window: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) filter_expired = after(tcp_jiffies32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (rs->rtt_us >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) (rs->rtt_us < bbr->min_rtt_us ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) (filter_expired && !rs->is_ack_delayed))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) bbr->min_rtt_us = rs->rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) bbr->min_rtt_stamp = tcp_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) bbr_save_cwnd(sk); /* note cwnd so we can restore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) bbr->probe_rtt_done_stamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (bbr->mode == BBR_PROBE_RTT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* Ignore low rate samples during this mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) tp->app_limited =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* Maintain min packets in flight for max(200 ms, 1 round). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!bbr->probe_rtt_done_stamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) bbr->probe_rtt_done_stamp = tcp_jiffies32 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) msecs_to_jiffies(bbr_probe_rtt_mode_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) bbr->probe_rtt_round_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) bbr->next_rtt_delivered = tp->delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else if (bbr->probe_rtt_done_stamp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (bbr->round_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) bbr->probe_rtt_round_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (bbr->probe_rtt_round_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) bbr_check_probe_rtt_done(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* Restart after idle ends only once we process a new S/ACK for data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (rs->delivered > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) bbr->idle_restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static void bbr_update_gains(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) switch (bbr->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) case BBR_STARTUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) bbr->pacing_gain = bbr_high_gain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) bbr->cwnd_gain = bbr_high_gain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) case BBR_DRAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) case BBR_PROBE_BW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) bbr->pacing_gain = (bbr->lt_use_bw ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) BBR_UNIT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) bbr_pacing_gain[bbr->cycle_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) bbr->cwnd_gain = bbr_cwnd_gain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) case BBR_PROBE_RTT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) bbr->pacing_gain = BBR_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) bbr->cwnd_gain = BBR_UNIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) bbr_update_bw(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bbr_update_ack_aggregation(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) bbr_update_cycle_phase(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) bbr_check_full_bw_reached(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) bbr_check_drain(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) bbr_update_min_rtt(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) bbr_update_gains(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static void bbr_main(struct sock *sk, const struct rate_sample *rs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) u32 bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) bbr_update_model(sk, rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) bw = bbr_bw(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static void bbr_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) bbr->prior_cwnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) bbr->rtt_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) bbr->next_rtt_delivered = tp->delivered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) bbr->prev_ca_state = TCP_CA_Open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) bbr->packet_conservation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) bbr->probe_rtt_done_stamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) bbr->probe_rtt_round_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) bbr->min_rtt_us = tcp_min_rtt(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) bbr->min_rtt_stamp = tcp_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) bbr->has_seen_rtt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) bbr_init_pacing_rate_from_rtt(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) bbr->round_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) bbr->idle_restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) bbr->full_bw_reached = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) bbr->full_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) bbr->full_bw_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) bbr->cycle_mstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) bbr->cycle_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) bbr_reset_lt_bw_sampling(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) bbr_reset_startup_mode(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) bbr->ack_epoch_mstamp = tp->tcp_mstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) bbr->ack_epoch_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) bbr->extra_acked_win_rtts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) bbr->extra_acked_win_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) bbr->extra_acked[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) bbr->extra_acked[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static u32 bbr_sndbuf_expand(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* In theory BBR does not need to undo the cwnd since it does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * always reduce cwnd on losses (see bbr_main()). Keep it for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static u32 bbr_undo_cwnd(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) bbr->full_bw_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) bbr_reset_lt_bw_sampling(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return tcp_sk(sk)->snd_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static u32 bbr_ssthresh(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) bbr_save_cwnd(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return tcp_sk(sk)->snd_ssthresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) union tcp_cc_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) u64 bw = bbr_bw(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) memset(&info->bbr, 0, sizeof(info->bbr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) info->bbr.bbr_bw_lo = (u32)bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) info->bbr.bbr_bw_hi = (u32)(bw >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) info->bbr.bbr_min_rtt = bbr->min_rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) info->bbr.bbr_pacing_gain = bbr->pacing_gain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) *attr = INET_DIAG_BBRINFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return sizeof(info->bbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void bbr_set_state(struct sock *sk, u8 new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct bbr *bbr = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (new_state == TCP_CA_Loss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct rate_sample rs = { .losses = 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) bbr->prev_ca_state = TCP_CA_Loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) bbr->full_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) bbr->round_start = 1; /* treat RTO like end of a round */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) bbr_lt_bw_sampling(sk, &rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .flags = TCP_CONG_NON_RESTRICTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .name = "bbr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) .init = bbr_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) .cong_control = bbr_main,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) .sndbuf_expand = bbr_sndbuf_expand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) .undo_cwnd = bbr_undo_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .cwnd_event = bbr_cwnd_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) .ssthresh = bbr_ssthresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .min_tso_segs = bbr_min_tso_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) .get_info = bbr_get_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) .set_state = bbr_set_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static int __init bbr_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return tcp_register_congestion_control(&tcp_bbr_cong_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static void __exit bbr_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) module_init(bbr_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) module_exit(bbr_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");