^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * TCP Illinois congestion control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Home page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * The algorithm is described in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * for High-Speed Networks"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Implemented from description in paper and ns-2 simulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/inet_diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define ALPHA_SHIFT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ALPHA_SCALE (1u<<ALPHA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define ALPHA_BASE ALPHA_SCALE /* 1.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define BETA_SHIFT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define BETA_SCALE (1u<<BETA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define BETA_MIN (BETA_SCALE/8) /* 0.125 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define BETA_MAX (BETA_SCALE/2) /* 0.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define BETA_BASE BETA_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int win_thresh __read_mostly = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) module_param(win_thresh, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int theta __read_mostly = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) module_param(theta, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* TCP Illinois Parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct illinois {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u64 sum_rtt; /* sum of rtt's measured within last rtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u16 cnt_rtt; /* # of rtts measured within last rtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 base_rtt; /* min of all rtt in usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 max_rtt; /* max of all rtt in usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 end_seq; /* right edge of current RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 alpha; /* Additive increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 beta; /* Muliplicative decrease */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u16 acked; /* # packets acked by current ACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u8 rtt_above; /* average rtt has gone above threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u8 rtt_low; /* # of rtts measurements below threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void rtt_reset(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ca->end_seq = tp->snd_nxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ca->cnt_rtt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ca->sum_rtt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* TODO: age max_rtt? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void tcp_illinois_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ca->alpha = ALPHA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ca->beta = BETA_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ca->base_rtt = 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ca->max_rtt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ca->acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ca->rtt_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ca->rtt_above = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) rtt_reset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Measure RTT for each ack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) s32 rtt_us = sample->rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ca->acked = sample->pkts_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* dup ack, no rtt sample */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (rtt_us < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* ignore bogus values, this prevents wraparound in alpha math */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (rtt_us > RTT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rtt_us = RTT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* keep track of minimum RTT seen so far */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (ca->base_rtt > rtt_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ca->base_rtt = rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* and max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (ca->max_rtt < rtt_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ca->max_rtt = rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ++ca->cnt_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ca->sum_rtt += rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Maximum queuing delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline u32 max_delay(const struct illinois *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return ca->max_rtt - ca->base_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Average queuing delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline u32 avg_delay(const struct illinois *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u64 t = ca->sum_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) do_div(t, ca->cnt_rtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return t - ca->base_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Compute value of alpha used for additive increase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * If small window then use 1.0, equivalent to Reno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * For larger windows, adjust based on average delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * A. If average delay is at minimum (we are uncongested),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * then use large alpha (10.0) to increase faster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * B. If average delay is at maximum (getting congested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * then use small alpha (0.3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * The result is a convex window growth curve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static u32 alpha(struct illinois *ca, u32 da, u32 dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 d1 = dm / 100; /* Low threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (da <= d1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* If never got out of low delay zone, then use max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!ca->rtt_above)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return ALPHA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Wait for 5 good RTT's before allowing alpha to go alpha max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * This prevents one good RTT from causing sudden window increase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (++ca->rtt_low < theta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return ca->alpha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ca->rtt_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ca->rtt_above = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ALPHA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ca->rtt_above = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * (dm - d1) amin amax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * k1 = -------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * amax - amin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * (dm - d1) amin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * k2 = ---------------- - d1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * amax - amin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * k1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * alpha = ----------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * k2 + da
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dm -= d1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) da -= d1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return (dm * ALPHA_MAX) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Beta used for multiplicative decrease.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * For small window sizes returns same value as Reno (0.5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * If delay is small (10% of max) then beta = 1/8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * If delay is up to 80% of max then beta = 1/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * In between is a linear function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static u32 beta(u32 da, u32 dm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u32 d2, d3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) d2 = dm / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (da <= d2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return BETA_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) d3 = (8 * dm) / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (da >= d3 || d3 <= d2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return BETA_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * bmin d3 - bmax d2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * k3 = -------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * d3 - d2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * bmax - bmin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * k4 = -------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * d3 - d2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * b = k3 + k4 da
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) / (d3 - d2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Update alpha and beta values once per RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void update_params(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (tp->snd_cwnd < win_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ca->alpha = ALPHA_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ca->beta = BETA_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else if (ca->cnt_rtt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 dm = max_delay(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 da = avg_delay(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ca->alpha = alpha(ca, da, dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ca->beta = beta(da, dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) rtt_reset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * In case of loss, reset to default values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void tcp_illinois_state(struct sock *sk, u8 new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (new_state == TCP_CA_Loss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ca->alpha = ALPHA_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ca->beta = BETA_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ca->rtt_low = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ca->rtt_above = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rtt_reset(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Increase window in response to successful acknowledgment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (after(ack, ca->end_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) update_params(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* RFC2861 only increase cwnd if fully utilized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!tcp_is_cwnd_limited(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* In slow start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (tcp_in_slow_start(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tcp_slow_start(tp, acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* snd_cwnd_cnt is # of packets since last cwnd increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) tp->snd_cwnd_cnt += ca->acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ca->acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* This is close approximation of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * tp->snd_cwnd += alpha/tp->snd_cwnd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (delta >= tp->snd_cwnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) (u32)tp->snd_cwnd_clamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) tp->snd_cwnd_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static u32 tcp_illinois_ssthresh(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Multiplicative decrease */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Extract info for Tcp socket info provided via netlink. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) union tcp_cc_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) const struct illinois *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) info->vegas.tcpv_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) info->vegas.tcpv_rttcnt = ca->cnt_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) info->vegas.tcpv_minrtt = ca->base_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) info->vegas.tcpv_rtt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (info->vegas.tcpv_rttcnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u64 t = ca->sum_rtt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) do_div(t, info->vegas.tcpv_rttcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) info->vegas.tcpv_rtt = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *attr = INET_DIAG_VEGASINFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return sizeof(struct tcpvegas_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static struct tcp_congestion_ops tcp_illinois __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) .init = tcp_illinois_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .ssthresh = tcp_illinois_ssthresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .undo_cwnd = tcp_reno_undo_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .cong_avoid = tcp_illinois_cong_avoid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .set_state = tcp_illinois_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .get_info = tcp_illinois_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .pkts_acked = tcp_illinois_acked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .name = "illinois",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int __init tcp_illinois_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return tcp_register_congestion_control(&tcp_illinois);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void __exit tcp_illinois_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) tcp_unregister_congestion_control(&tcp_illinois);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) module_init(tcp_illinois_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) module_exit(tcp_illinois_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) MODULE_DESCRIPTION("TCP Illinois");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) MODULE_VERSION("1.0");