^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/circ_buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/af_rxrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Propose a PING ACK be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static void rxrpc_propose_ping(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) bool immediate, bool background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (immediate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (background &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long ping_at = now + rxrpc_idle_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (time_before(ping_at, call->ping_at)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) WRITE_ONCE(call->ping_at, ping_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) rxrpc_reduce_call_timer(call, ping_at, now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) rxrpc_timer_set_for_ping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * propose an ACK be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 serial, bool immediate, bool background,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum rxrpc_propose_ack_trace why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned long expiry = rxrpc_soft_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) s8 prior = rxrpc_ack_priority[ack_reason];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* Pings are handled specially because we don't want to accidentally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * lose a ping response by subsuming it into a ping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (ack_reason == RXRPC_ACK_PING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) rxrpc_propose_ping(call, immediate, background);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) goto trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * numbers, but we don't alter the timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) _debug("prior %u %u vs %u %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ack_reason, prior,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (ack_reason == call->ackr_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) outcome = rxrpc_propose_ack_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) call->ackr_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!immediate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) goto trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) call->ackr_reason = ack_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) call->ackr_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) outcome = rxrpc_propose_ack_subsume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) switch (ack_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) case RXRPC_ACK_REQUESTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (rxrpc_requested_ack_delay < expiry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) expiry = rxrpc_requested_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (serial == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) immediate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) case RXRPC_ACK_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (rxrpc_soft_ack_delay < expiry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) expiry = rxrpc_soft_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) case RXRPC_ACK_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (rxrpc_idle_ack_delay < expiry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) expiry = rxrpc_idle_ack_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) immediate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) _debug("already scheduled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) } else if (immediate || expiry == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) _debug("immediate ACK %lx", call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long now = jiffies, ack_at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (call->peer->srtt_us != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ack_at = expiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ack_at += READ_ONCE(call->tx_backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ack_at += now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (time_before(ack_at, call->ack_at)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) WRITE_ONCE(call->ack_at, ack_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) rxrpc_reduce_call_timer(call, ack_at, now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) rxrpc_timer_set_for_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) trace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) background, outcome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * propose an ACK be sent, locking the call structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 serial, bool immediate, bool background,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) enum rxrpc_propose_ack_trace why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) __rxrpc_propose_ACK(call, ack_reason, serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) immediate, background, why);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Handle congestion being detected by the retransmit timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void rxrpc_congestion_timeout(struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Perform retransmission of NAK'd and unack'd packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long resend_at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) rxrpc_seq_t cursor, seq, top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ktime_t now, max_age, oldest, ack_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u8 annotation, anno_type, retrans = 0, unacked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) now = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cursor = call->tx_hard_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) top = call->tx_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ASSERT(before_eq(cursor, top));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (cursor == top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Scan the packet list without dropping the lock and decide which of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * the packets in the Tx buffer we're going to resend and what the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * resend timeout will be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) oldest = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) for (seq = cursor + 1; before_eq(seq, top); seq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ix = seq & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) annotation = call->rxtx_annotations[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) anno_type = annotation & RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) annotation &= ~RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (anno_type == RXRPC_TX_ANNO_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) skb = call->rxtx_buffer[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rxrpc_see_skb(skb, rxrpc_skb_seen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (anno_type == RXRPC_TX_ANNO_UNACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ktime_after(skb->tstamp, max_age)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ktime_before(skb->tstamp, oldest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) oldest = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!(annotation & RXRPC_TX_ANNO_RESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unacked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Okay, we need to retransmit a packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) retrans++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) trace_rxrpc_retransmit(call, seq, annotation | anno_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) WRITE_ONCE(call->resend_at, resend_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (unacked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rxrpc_congestion_timeout(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* If there was nothing that needed retransmission then it's likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * that an ACK got lost somewhere. Send a ping to find out instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * retransmitting data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!retrans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rxrpc_reduce_call_timer(call, resend_at, now_j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rxrpc_timer_set_for_resend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ack_ts = ktime_sub(now, call->acks_latest_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) rxrpc_propose_ack_ping_for_lost_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rxrpc_send_ack_packet(call, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Now go through the Tx window and perform the retransmissions. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * have to drop the lock for each send. If an ACK comes in whilst the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * lock is dropped, it may clear some of the retransmission markers for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * packets that it soft-ACKs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) for (seq = cursor + 1; before_eq(seq, top); seq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ix = seq & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) annotation = call->rxtx_annotations[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) anno_type = annotation & RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (anno_type != RXRPC_TX_ANNO_RETRANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* We need to reset the retransmission state, but we need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * so before we drop the lock as a new ACK/NAK may come in and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * confuse things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) annotation &= ~RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) call->rxtx_annotations[ix] = annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) skb = call->rxtx_buffer[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rxrpc_get_skb(skb, rxrpc_skb_got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (rxrpc_send_data_packet(call, skb, true) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (rxrpc_is_client_call(call))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) rxrpc_expose_client_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (after(call->tx_hard_ack, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) seq = call->tx_hard_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Handle retransmission and deferred ACK/abort generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void rxrpc_process_call(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct rxrpc_call *call =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) container_of(work, struct rxrpc_call, processor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rxrpc_serial_t *send_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned long now, next, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned int iterations = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rxrpc_see_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) //printk("\n--------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) _enter("{%d,%s,%lx}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) call->debug_id, rxrpc_call_states[call->state], call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) recheck_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Limit the number of times we do this before returning to the manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) iterations++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (iterations > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) goto requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rxrpc_send_abort_packet(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto recheck_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (call->state == RXRPC_CALL_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) rxrpc_delete_call_timer(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Work out if any timeouts tripped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) t = READ_ONCE(call->expect_rx_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) t = READ_ONCE(call->expect_req_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) t = READ_ONCE(call->expect_term_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) t = READ_ONCE(call->ack_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) set_bit(RXRPC_CALL_EV_ACK, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) t = READ_ONCE(call->ack_lost_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) t = READ_ONCE(call->keepalive_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rxrpc_propose_ack_ping_for_keepalive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) set_bit(RXRPC_CALL_EV_PING, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) t = READ_ONCE(call->ping_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) set_bit(RXRPC_CALL_EV_PING, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) t = READ_ONCE(call->resend_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (time_after_eq(now, t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) set_bit(RXRPC_CALL_EV_RESEND, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Process events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) trace_rxrpc_call_reset(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) set_bit(RXRPC_CALL_EV_ABORT, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto recheck_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) send_ack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) call->acks_lost_top = call->tx_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rxrpc_propose_ack_ping_for_lost_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) send_ack = &call->acks_lost_ping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) send_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (call->ackr_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) rxrpc_send_ack_packet(call, false, send_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) goto recheck_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) rxrpc_send_ack_packet(call, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) goto recheck_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) rxrpc_resend(call, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) goto recheck_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Make sure the timer is restarted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) next = call->expect_rx_by;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) set(call->expect_req_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) set(call->expect_term_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) set(call->ack_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) set(call->ack_lost_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) set(call->resend_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) set(call->keepalive_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) set(call->ping_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (time_after_eq(now, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto recheck_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* other events may have been raised since we started checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (call->events && call->state < RXRPC_CALL_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto requeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) rxrpc_put_call(call, rxrpc_call_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) requeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) __rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }