Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* RTT/RTO calculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * https://tools.ietf.org/html/rfc6298
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * https://tools.ietf.org/html/rfc1122#section-4.2.3.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define RXRPC_RTO_MAX	((unsigned)(120 * HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define rxrpc_jiffies32 ((u32)jiffies)		/* As rxrpc_jiffies32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	return 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static u32 rxrpc_bound_rto(u32 rto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	return min(rto, RXRPC_RTO_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * Called to compute a smoothed rtt estimate. The data fed to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * routine either comes from timestamps, or from segments that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * known _not_ to have been retransmitted [see Karn/Partridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * piece by Van Jacobson.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * NOTE: the next three routines used to be one big routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * To save cycles in the RFC 1323 implementation it was better to break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * it up into three procedures. -- erics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	long m = sample_rtt_us; /* RTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u32 srtt = peer->srtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/*	The following amusing code comes from Jacobson's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 *	article in SIGCOMM '88.  Note that rtt and mdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 *	are scaled versions of rtt and mean deviation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 *	This is designed to be as fast as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 *	m stands for "measurement".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	 *	On a 1990 paper the rto value is changed to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 *	RTO = rtt + 4 * mdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * Funny. This algorithm seems to be very broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * These formulae increase RTO, when it should be decreased, increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * too slowly, when it should be increased quickly, decrease too quickly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * does not matter how to _calculate_ it. Seems, it was trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 * that VJ failed to avoid. 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (srtt != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		m -= (srtt >> 3);	/* m is now error in rtt est */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		if (m < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			m = -m;		/* m is now abs(error) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			m -= (peer->mdev_us >> 2);   /* similar update on mdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			/* This is similar to one of Eifel findings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			 * Eifel blocks mdev updates when rtt decreases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			 * This solution is a bit different: we use finer gain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			 * for mdev in this case (alpha*beta).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			 * Like Eifel it also prevents growth of rto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			 * but also it limits too fast rto decreases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			 * happening in pure Eifel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			if (m > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 				m >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			m -= (peer->mdev_us >> 2);   /* similar update on mdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		peer->mdev_us += m;		/* mdev = 3/4 mdev + 1/4 new */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		if (peer->mdev_us > peer->mdev_max_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			peer->mdev_max_us = peer->mdev_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			if (peer->mdev_max_us > peer->rttvar_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 				peer->rttvar_us = peer->mdev_max_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		/* no previous measure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		srtt = m << 3;		/* take the measured time to be rtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		peer->mdev_us = m << 1;	/* make sure rto = 3*rtt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		peer->mdev_max_us = peer->rttvar_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	peer->srtt_us = max(1U, srtt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * Calculate rto without backoff.  This is the second half of Van Jacobson's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * routine referred to above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void rxrpc_set_rto(struct rxrpc_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	u32 rto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/* 1. If rtt variance happened to be less 50msec, it is hallucination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 *    It cannot be less due to utterly erratic ACK generation made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 *    to do with delayed acks, because at cwnd>2 true delack timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 *    is invisible. Actually, Linux-2.4 also generates erratic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 *    ACKs in some circumstances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	rto = __rxrpc_set_rto(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/* 2. Fixups made earlier cannot be right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 *    If we do not estimate RTO correctly without them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 *    all the algo is pure shit and should be replaced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 *    with correct one. It is exactly, which we pretend to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * guarantees that rto is higher.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	peer->rto_j = rxrpc_bound_rto(rto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (rtt_us < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	//rxrpc_update_rtt_min(peer, rtt_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	rxrpc_rtt_estimator(peer, rtt_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	rxrpc_set_rto(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/* RFC6298: only reset backoff on valid RTT measurement. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	peer->backoff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * Add RTT information to cache.  This is called in softirq mode and has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * exclusive access to the peer RTT data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			int rtt_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			ktime_t send_time, ktime_t resp_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct rxrpc_peer *peer = call->peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	s64 rtt_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	rtt_us = ktime_to_us(ktime_sub(resp_time, send_time));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (rtt_us < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	spin_lock(&peer->rtt_input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	rxrpc_ack_update_rtt(peer, rtt_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (peer->rtt_count < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		peer->rtt_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	spin_unlock(&peer->rtt_input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			   peer->srtt_us >> 3, peer->rto_j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * Get the retransmission timeout to set in jiffies, backing it off each time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * we retransmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	u64 timo_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u8 backoff = READ_ONCE(peer->backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	timo_j = peer->rto_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	timo_j <<= backoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (retrans && timo_j * 2 <= RXRPC_RTO_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		WRITE_ONCE(peer->backoff, backoff + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (timo_j < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		timo_j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	return timo_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	peer->rto_j	= RXRPC_TIMEOUT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	peer->mdev_us	= jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	peer->backoff	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	//minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }