^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Changes to meet Linux coding standards, and DCCP infrastructure fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This implementation should follow RFC 4341
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "../feat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "ccid2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static bool ccid2_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define ccid2_pr_debug(format, a...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct ccid2_seq *seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* check if we have space to preserve the pointer to the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) sizeof(struct ccid2_seq *)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* allocate buffer and initialize linked list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) seqp = kmalloc_array(CCID2_SEQBUF_LEN, sizeof(struct ccid2_seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) gfp_any());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (seqp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) seqp[i].ccid2s_next = &seqp[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) seqp[i + 1].ccid2s_prev = &seqp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* This is the first allocation. Initiate the head and tail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (hc->tx_seqbufc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) hc->tx_seqh = hc->tx_seqt = seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* link the existing list with the one we just created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) hc->tx_seqh->ccid2s_next = seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) seqp->ccid2s_prev = hc->tx_seqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* store the original pointer to the buffer so we can free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) hc->tx_seqbufc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return CCID_PACKET_WILL_DEQUEUE_LATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return CCID_PACKET_SEND_AT_ONCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * acceptable since this causes starvation/deadlock whenever cwnd < 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (val == 0 || val > max_ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) val = max_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) min_t(u32, val, DCCPF_ACK_RATIO_MAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void ccid2_check_l_ack_ratio(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * After a loss, idle period, application limited period, or RTO we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * need to check that the ack ratio is still less than the congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * window. Otherwise, we will send an entire congestion window of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * packets and got no response because we haven't sent ack ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * packets yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * If the ack ratio does need to be reduced, we reduce it to half of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * the congestion window (or 1 if that's zero) instead of to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * congestion window. This prevents problems if one ack is lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) clamp_val(val, DCCPF_SEQ_WMIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) DCCPF_SEQ_WMAX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void dccp_tasklet_schedule(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __tasklet_schedule(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void ccid2_hc_tx_rto_expire(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct sock *sk = hc->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ccid2_pr_debug("RTO_EXPIRE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (sk->sk_state == DCCP_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* back-off timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) hc->tx_rto <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (hc->tx_rto > DCCP_RTO_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) hc->tx_rto = DCCP_RTO_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* adjust pipe, cwnd etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) hc->tx_ssthresh = hc->tx_cwnd / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (hc->tx_ssthresh < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) hc->tx_ssthresh = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) hc->tx_cwnd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) hc->tx_pipe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* clear state about stuff we sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) hc->tx_seqt = hc->tx_seqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) hc->tx_packets_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* clear ack ratio state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) hc->tx_rpseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) hc->tx_rpdupack = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ccid2_change_l_ack_ratio(sk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* if we were blocked before, we may now send cwnd=1 packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (sender_was_blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dccp_tasklet_schedule(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* restart backed-off timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Congestion window validation (RFC 2861).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static bool ccid2_do_cwv = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) module_param(ccid2_do_cwv, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * ccid2_update_used_window - Track how much of cwnd is actually used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * This is done in addition to CWV. The sender needs to have an idea of how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * packets may be in flight, to set the local Sequence Window value accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * maximum-used window. We use an EWMA low-pass filter to filter out noise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* This borrows the code of tcp_cwnd_application_limited() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* don't reduce cwnd below the initial window (IW) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) win_used = max(hc->tx_cwnd_used, init_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (win_used < hc->tx_cwnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) hc->tx_ssthresh = max(hc->tx_ssthresh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) hc->tx_cwnd_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) hc->tx_cwnd_stamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ccid2_check_l_ack_ratio(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* This borrows the code of tcp_cwnd_restart() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 cwnd = hc->tx_cwnd, restart_cwnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) s32 delta = now - hc->tx_lsndtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* don't reduce cwnd below the initial window (IW) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) restart_cwnd = min(cwnd, iwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cwnd >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) hc->tx_cwnd = max(cwnd, restart_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) hc->tx_cwnd_stamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) hc->tx_cwnd_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ccid2_check_l_ack_ratio(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct dccp_sock *dp = dccp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) const u32 now = ccid2_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct ccid2_seq *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* slow-start after idle periods (RFC 2581, RFC 2861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (ccid2_do_cwv && !hc->tx_pipe &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ccid2_cwnd_restart(sk, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) hc->tx_lsndtime = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) hc->tx_pipe += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* see whether cwnd was fully used (RFC 2861), update expected window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (ccid2_cwnd_network_limited(hc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ccid2_update_used_window(hc, hc->tx_cwnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) hc->tx_cwnd_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) hc->tx_cwnd_stamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (hc->tx_pipe > hc->tx_cwnd_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) hc->tx_cwnd_used = hc->tx_pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ccid2_update_used_window(hc, hc->tx_cwnd_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ccid2_cwnd_application_limited(sk, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) hc->tx_seqh->ccid2s_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) hc->tx_seqh->ccid2s_sent = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) next = hc->tx_seqh->ccid2s_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* check if we need to alloc more space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (next == hc->tx_seqt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (ccid2_hc_tx_alloc_seq(hc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) DCCP_CRIT("packet history - out of memory!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* FIXME: find a more graceful way to bail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) next = hc->tx_seqh->ccid2s_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) BUG_ON(next == hc->tx_seqt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) hc->tx_seqh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * FIXME: The code below is broken and the variables have been removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * from the socket struct. The `ackloss' variable was always set to 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * and with arsent there are several problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * (i) it doesn't just count the number of Acks, but all sent packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * (ii) it is expressed in # of packets, not # of windows, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * comparison below uses the wrong formula: Appendix A of RFC 4341
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * comes up with the number K = cwnd / (R^2 - R) of consecutive windows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * of data with no lost or marked Ack packets. If arsent were the # of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * consecutive Acks received without loss, then Ack Ratio needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * decreased by 1 when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * where cwnd / R is the number of Acks received per window of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * (cf. RFC 4341, App. A). The problems are that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * - arsent counts other packets as well;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * - the comparison uses a formula different from RFC 4341;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * - computing a cubic/quadratic equation each time is too complicated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Hence a different algorithm is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Ack Ratio. Need to maintain a concept of how many windows we sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) hc->tx_arsent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* We had an ack loss in this window... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (hc->tx_ackloss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (hc->tx_arsent >= hc->tx_cwnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) hc->tx_arsent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) hc->tx_ackloss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* No acks lost up to now... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* decrease ack ratio if enough packets were sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (dp->dccps_l_ack_ratio > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* XXX don't calculate denominator each time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dp->dccps_l_ack_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) denom = hc->tx_cwnd * hc->tx_cwnd / denom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (hc->tx_arsent >= denom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) hc->tx_arsent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* we can't increase ack ratio further [1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) hc->tx_arsent = 0; /* or maybe set it to cwnd*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct ccid2_seq *seqp = hc->tx_seqt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) while (seqp != hc->tx_seqh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) (unsigned long long)seqp->ccid2s_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) seqp->ccid2s_acked, seqp->ccid2s_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) seqp = seqp->ccid2s_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ccid2_pr_debug("=========\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * This code is almost identical with TCP's tcp_rtt_estimator(), since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * - it has a higher sampling frequency (recommended by RFC 1323),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * - it is simple (cf. more complex proposals such as Eifel timer or research
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * which suggests that the gain should be set according to window size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * - in tests it was found to work well with CCID2 [gerrit].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) long m = mrtt ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (hc->tx_srtt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* First measurement m */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) hc->tx_srtt = m << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) hc->tx_mdev = m << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) hc->tx_rttvar = hc->tx_mdev_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) m -= (hc->tx_srtt >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) hc->tx_srtt += m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Similarly, update scaled mdev with regard to |m| */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (m < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) m = -m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) m -= (hc->tx_mdev >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * This neutralises RTO increase when RTT < SRTT - mdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * in Linux TCP", USENIX 2002, pp. 49-62).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (m > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) m >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) m -= (hc->tx_mdev >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) hc->tx_mdev += m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (hc->tx_mdev > hc->tx_mdev_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) hc->tx_mdev_max = hc->tx_mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (hc->tx_mdev_max > hc->tx_rttvar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) hc->tx_rttvar = hc->tx_mdev_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Decay RTTVAR at most once per flight, exploiting that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * GAR is a useful bound for FlightSize = pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * AWL is probably too low here, as it over-estimates pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (hc->tx_mdev_max < hc->tx_rttvar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) hc->tx_rttvar -= (hc->tx_rttvar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) hc->tx_mdev_max) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) hc->tx_mdev_max = tcp_rto_min(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Set RTO from SRTT and RTTVAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * This agrees with RFC 4341, 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * "Because DCCP does not retransmit data, DCCP does not require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * TCP's recommended minimum timeout of one second".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (hc->tx_rto > DCCP_RTO_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) hc->tx_rto = DCCP_RTO_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned int *maxincr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct dccp_sock *dp = dccp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (hc->tx_cwnd < dp->dccps_l_seq_win &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) r_seq_used < dp->dccps_r_seq_win) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (hc->tx_cwnd < hc->tx_ssthresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) hc->tx_cwnd += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *maxincr -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) hc->tx_packets_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) hc->tx_cwnd += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) hc->tx_packets_acked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Adjust the local sequence window and the ack ratio to allow about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * 5 times the number of packets in the network (RFC 4340 7.5.2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * FIXME: RTT is sampled several times per acknowledgment (for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * This causes the RTT to be over-estimated, since the older entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * in the Ack Vector have earlier sending times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * The cleanest solution is to not use the ccid2s_sent field at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * and instead use DCCP timestamps: requires changes in other places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) hc->tx_last_cong = ccid2_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ccid2_check_l_ack_ratio(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) u8 option, u8 *optval, u8 optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) switch (option) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case DCCPO_ACK_VECTOR_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case DCCPO_ACK_VECTOR_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) option - DCCPO_ACK_VECTOR_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct dccp_sock *dp = dccp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct dccp_ackvec_parsed *avp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u64 ackno, seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct ccid2_seq *seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) unsigned int maxincr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* check reverse path congestion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) seqno = DCCP_SKB_CB(skb)->dccpd_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* XXX this whole "algorithm" is broken. Need to fix it to keep track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * of the seqnos of the dupacks so that rpseq and rpdupack are correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * -sorbo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* need to bootstrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (hc->tx_rpdupack == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) hc->tx_rpdupack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) hc->tx_rpseq = seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* check if packet is consecutive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) hc->tx_rpseq = seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* it's a later packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) else if (after48(seqno, hc->tx_rpseq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) hc->tx_rpdupack++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* check if we got enough dupacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (hc->tx_rpdupack >= NUMDUPACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) hc->tx_rpdupack = -1; /* XXX lame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) hc->tx_rpseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * FIXME: Ack Congestion Control is broken; in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * the current state instabilities occurred with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Ack Ratios greater than 1; causing hang-ups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * and long RTO timeouts. This needs to be fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * before opening up dynamic changes. -- gerrit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* check forward path congestion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (dccp_packet_without_ack(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* still didn't send out new data packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (hc->tx_seqh == hc->tx_seqt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (after48(ackno, hc->tx_high_ack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) hc->tx_high_ack = ackno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) seqp = hc->tx_seqt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) while (before48(seqp->ccid2s_seq, ackno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) seqp = seqp->ccid2s_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (seqp == hc->tx_seqh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) seqp = hc->tx_seqh->ccid2s_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * packets per acknowledgement. Rounding up avoids that cwnd is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (hc->tx_cwnd < hc->tx_ssthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* go through all ack vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) list_for_each_entry(avp, &hc->tx_av_chunks, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* go through this ack vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) for (; avp->len--; avp->vec++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u64 ackno_end_rl = SUB48(ackno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dccp_ackvec_runlen(avp->vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ccid2_pr_debug("ackvec %llu |%u,%u|\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) (unsigned long long)ackno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dccp_ackvec_state(avp->vec) >> 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dccp_ackvec_runlen(avp->vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* if the seqno we are analyzing is larger than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * current ackno, then move towards the tail of our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * seqnos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) while (after48(seqp->ccid2s_seq, ackno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (seqp == hc->tx_seqt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) seqp = seqp->ccid2s_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* check all seqnos in the range of the vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * run length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) const u8 state = dccp_ackvec_state(avp->vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* new packet received or marked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (state != DCCPAV_NOT_RECEIVED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) !seqp->ccid2s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (state == DCCPAV_ECN_MARKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ccid2_congestion_event(sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) seqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ccid2_new_ack(sk, seqp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) &maxincr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) seqp->ccid2s_acked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ccid2_pr_debug("Got ack for %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) (unsigned long long)seqp->ccid2s_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) hc->tx_pipe--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (seqp == hc->tx_seqt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) seqp = seqp->ccid2s_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ackno = SUB48(ackno_end_rl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* The state about what is acked should be correct now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Check for NUMDUPACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) seqp = hc->tx_seqt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) seqp = seqp->ccid2s_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (seqp == hc->tx_seqh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) seqp = hc->tx_seqh->ccid2s_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (seqp->ccid2s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (done == NUMDUPACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (seqp == hc->tx_seqt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) seqp = seqp->ccid2s_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* If there are at least 3 acknowledgements, anything unacknowledged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * below the last sequence number is considered lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (done == NUMDUPACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct ccid2_seq *last_acked = seqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* check for lost packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!seqp->ccid2s_acked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ccid2_pr_debug("Packet lost: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) (unsigned long long)seqp->ccid2s_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* XXX need to traverse from tail -> head in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * order to detect multiple congestion events in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * one ack vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ccid2_congestion_event(sk, seqp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) hc->tx_pipe--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (seqp == hc->tx_seqt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) seqp = seqp->ccid2s_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) hc->tx_seqt = last_acked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* trim acked packets in tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) while (hc->tx_seqt != hc->tx_seqh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (!hc->tx_seqt->ccid2s_acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) hc->tx_seqt = hc->tx_seqt->ccid2s_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* restart RTO timer if not all outstanding data has been acked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (hc->tx_pipe == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) sk_stop_timer(sk, &hc->tx_rtotimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* check if incoming Acks allow pending packets to be sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) dccp_tasklet_schedule(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct dccp_sock *dp = dccp_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u32 max_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) hc->tx_ssthresh = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* Use larger initial windows (RFC 4341, section 5). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) hc->tx_expected_wnd = hc->tx_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Make sure that Ack Ratio is enabled and within bounds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dp->dccps_l_ack_ratio = max_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* XXX init ~ to window size... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ccid2_hc_tx_alloc_seq(hc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) hc->tx_rto = DCCP_TIMEOUT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) hc->tx_rpdupack = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) hc->tx_cwnd_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) hc->sk = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) timer_setup(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) INIT_LIST_HEAD(&hc->tx_av_chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void ccid2_hc_tx_exit(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sk_stop_timer(sk, &hc->tx_rtotimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) for (i = 0; i < hc->tx_seqbufc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) kfree(hc->tx_seqbuf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) hc->tx_seqbufc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!dccp_data_packet(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) dccp_send_ack(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) hc->rx_num_data_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct ccid_operations ccid2_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .ccid_id = DCCPC_CCID2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .ccid_name = "TCP-like",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .ccid_hc_tx_init = ccid2_hc_tx_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .ccid_hc_tx_exit = ccid2_hc_tx_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) module_param(ccid2_debug, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) #endif