^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Binary Increase Congestion control for TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Home page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This is from the implementation of BICTCP in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Lison-Xu, Kahaled Harfoush, and Injong Rhee.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * "Binary Increase Congestion Control for Fast, Long Distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Networks" in InfoComm 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Available from:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * http://netsrv.csc.ncsu.edu/export/bitcp.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Unless BIC is enabled and congestion window is large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * this behaves the same as the original Reno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * max_cwnd = snd_cwnd * beta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define BICTCP_B 4 /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * In binary search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * go to point (max+min)/N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int fast_convergence = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int max_increment = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int low_window = 14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int initial_ssthresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static int smooth_part = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) module_param(fast_convergence, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) module_param(max_increment, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) module_param(low_window, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) module_param(beta, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) MODULE_PARM_DESC(beta, "beta for multiplicative increase");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) module_param(initial_ssthresh, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) module_param(smooth_part, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* BIC TCP Parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct bictcp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 cnt; /* increase cwnd by 1 after ACKs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 last_max_cwnd; /* last maximum snd_cwnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 last_cwnd; /* the last snd_cwnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u32 last_time; /* time when updated last_cwnd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 epoch_start; /* beginning of an epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define ACK_RATIO_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline void bictcp_reset(struct bictcp *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ca->cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ca->last_max_cwnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ca->last_cwnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ca->last_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ca->epoch_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void bictcp_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct bictcp *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bictcp_reset(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (initial_ssthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Compute congestion window to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (ca->last_cwnd == cwnd &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ca->last_cwnd = cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ca->last_time = tcp_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (ca->epoch_start == 0) /* record the beginning of an epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ca->epoch_start = tcp_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* start off normal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (cwnd <= low_window) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ca->cnt = cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* binary increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (cwnd < ca->last_max_cwnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __u32 dist = (ca->last_max_cwnd - cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) / BICTCP_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (dist > max_increment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* linear increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ca->cnt = cwnd / max_increment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) else if (dist <= 1U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* binary search increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ca->cnt = (cwnd * smooth_part) / BICTCP_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* binary search increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ca->cnt = cwnd / dist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* slow start AMD linear increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (cwnd < ca->last_max_cwnd + BICTCP_B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* slow start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ca->cnt = (cwnd * smooth_part) / BICTCP_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* slow start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ca->cnt = (cwnd * (BICTCP_B-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) / (cwnd - ca->last_max_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* linear increase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ca->cnt = cwnd / max_increment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* if in slow start or link utilization is very low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (ca->last_max_cwnd == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (ca->cnt > 20) /* increase cwnd 5% per RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ca->cnt = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (ca->cnt == 0) /* cannot be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ca->cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct bictcp *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!tcp_is_cwnd_limited(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (tcp_in_slow_start(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) acked = tcp_slow_start(tp, acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bictcp_update(ca, tp->snd_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) tcp_cong_avoid_ai(tp, ca->cnt, acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * behave like Reno until low_window is reached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * then increase congestion window slowly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static u32 bictcp_recalc_ssthresh(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) const struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct bictcp *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ca->epoch_start = 0; /* end of epoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Wmax and fast convergence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) / (2 * BICTCP_BETA_SCALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ca->last_max_cwnd = tp->snd_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (tp->snd_cwnd <= low_window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return max(tp->snd_cwnd >> 1U, 2U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void bictcp_state(struct sock *sk, u8 new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (new_state == TCP_CA_Loss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) bictcp_reset(inet_csk_ca(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Track delayed acknowledgment ratio using sliding window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * ratio = (15*ratio + sample) / 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) const struct inet_connection_sock *icsk = inet_csk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (icsk->icsk_ca_state == TCP_CA_Open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct bictcp *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ca->delayed_ack += sample->pkts_acked -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) (ca->delayed_ack >> ACK_RATIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static struct tcp_congestion_ops bictcp __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .init = bictcp_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .ssthresh = bictcp_recalc_ssthresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .cong_avoid = bictcp_cong_avoid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .set_state = bictcp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .undo_cwnd = tcp_reno_undo_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .pkts_acked = bictcp_acked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .name = "bic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static int __init bictcp_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return tcp_register_congestion_control(&bictcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void __exit bictcp_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) tcp_unregister_congestion_control(&bictcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) module_init(bictcp_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) module_exit(bictcp_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) MODULE_AUTHOR("Stephen Hemminger");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) MODULE_DESCRIPTION("BIC TCP");