Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * TCP HYBLA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * TCP-HYBLA Congestion control algorithm, based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *   C.Caini, R.Firrincieli, "TCP-Hybla: A TCP Enhancement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *   for Heterogeneous Networks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *   International Journal on satellite Communications,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *				       September 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *    Daniele Lacamera
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *    root at danielinux.net
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* Tcp Hybla structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct hybla {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	bool  hybla_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	u32   snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	u32   rho;	      /* Rho parameter, integer part  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	u32   rho2;	      /* Rho * Rho, integer part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	u32   rho_3ls;	      /* Rho parameter, <<3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	u32   rho2_7ls;	      /* Rho^2, <<7	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	u32   minrtt_us;      /* Minimum smoothed round trip time value seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static int rtt0 = 25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) module_param(rtt0, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /* This is called to refresh values for hybla parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static inline void hybla_recalc_param (struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct hybla *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	ca->rho_3ls = max_t(u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 			    tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			    8U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	ca->rho = ca->rho_3ls >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	ca->rho2 = ca->rho2_7ls >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static void hybla_init(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct hybla *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	ca->rho = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	ca->rho2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	ca->rho_3ls = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	ca->rho2_7ls = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	ca->snd_cwnd_cents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	ca->hybla_en = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	tp->snd_cwnd = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	tp->snd_cwnd_clamp = 65535;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	/* 1st Rho measurement based on initial srtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	hybla_recalc_param(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	/* set minimum rtt as this is the 1st ever seen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	ca->minrtt_us = tp->srtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	tp->snd_cwnd = ca->rho;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void hybla_state(struct sock *sk, u8 ca_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct hybla *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	ca->hybla_en = (ca_state == TCP_CA_Open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static inline u32 hybla_fraction(u32 odds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	static const u32 fractions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		128, 139, 152, 165, 181, 197, 215, 234,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	return (odds < ARRAY_SIZE(fractions)) ? fractions[odds] : 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /* TCP Hybla main routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * This is the algorithm behavior:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  *     o Recalc Hybla parameters if min_rtt has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  *     o Give cwnd a new value based on the model proposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  *     o remember increments <1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct tcp_sock *tp = tcp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct hybla *ca = inet_csk_ca(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	u32 increment, odd, rho_fractions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	int is_slowstart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	/*  Recalculate rho only if this srtt is the lowest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (tp->srtt_us < ca->minrtt_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		hybla_recalc_param(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		ca->minrtt_us = tp->srtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (!tcp_is_cwnd_limited(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (!ca->hybla_en) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		tcp_reno_cong_avoid(sk, ack, acked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (ca->rho == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		hybla_recalc_param(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	rho_fractions = ca->rho_3ls - (ca->rho << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (tcp_in_slow_start(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		 * slow start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		 *      INC = 2^RHO - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		 * This is done by splitting the rho parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		 * into 2 parts: an integer part and a fraction part.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 * Inrement<<7 is estimated by doing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		 *	       [2^(int+fract)]<<7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 * that is equal to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		 *	       (2^int)	*  [(2^fract) <<7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		 * 2^int is straightly computed as 1<<int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		 * while we will use hybla_slowstart_fraction_increment() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		 * calculate 2^fract in a <<7 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		is_slowstart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		increment = ((1 << min(ca->rho, 16U)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			hybla_fraction(rho_fractions)) - 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		 * congestion avoidance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		 * INC = RHO^2 / W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		 * as long as increment is estimated as (rho<<7)/window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		 * it already is <<7 and we can easily count its fractions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		increment = ca->rho2_7ls / tp->snd_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		if (increment < 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			tp->snd_cwnd_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	odd = increment % 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	tp->snd_cwnd += increment >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	ca->snd_cwnd_cents += odd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	/* check when fractions goes >=128 and increase cwnd by 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	while (ca->snd_cwnd_cents >= 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		tp->snd_cwnd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		ca->snd_cwnd_cents -= 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		tp->snd_cwnd_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/* check when cwnd has not been incremented for a while */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		tp->snd_cwnd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		tp->snd_cwnd_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/* clamp down slowstart cwnd to ssthresh value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (is_slowstart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static struct tcp_congestion_ops tcp_hybla __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	.init		= hybla_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	.ssthresh	= tcp_reno_ssthresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	.undo_cwnd	= tcp_reno_undo_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	.cong_avoid	= hybla_cong_avoid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	.set_state	= hybla_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	.name		= "hybla"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int __init hybla_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	BUILD_BUG_ON(sizeof(struct hybla) > ICSK_CA_PRIV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	return tcp_register_congestion_control(&tcp_hybla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void __exit hybla_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	tcp_unregister_congestion_control(&tcp_hybla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) module_init(hybla_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) module_exit(hybla_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) MODULE_AUTHOR("Daniele Lacamera");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) MODULE_DESCRIPTION("TCP Hybla");