^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* RxRPC packet transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/af_rxrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct rxrpc_ack_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct rxrpc_wire_header whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct rxrpc_ackpacket ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u8 acks[255];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u8 pad[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct rxrpc_ackinfo ackinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct rxrpc_abort_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct rxrpc_wire_header whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __be32 abort_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static const char rxrpc_keepalive_string[] = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Increase Tx backoff on transmission failure and clear it on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u16 tx_backoff = READ_ONCE(call->tx_backoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (tx_backoff < HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) WRITE_ONCE(call->tx_backoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Arrange for a keepalive ping a certain time after we last transmitted. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * lets the far side know we're still interested in this call and helps keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the route through any intervening firewall open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Receiving a response to the ping will prevent the ->expect_rx_by timer from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * expiring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void rxrpc_set_keepalive(struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) keepalive_at += now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) WRITE_ONCE(call->keepalive_at, keepalive_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) rxrpc_reduce_call_timer(call, keepalive_at, now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) rxrpc_timer_set_for_keepalive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Fill out an ACK packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct rxrpc_ack_buffer *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) rxrpc_seq_t *_hard_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) rxrpc_seq_t *_top,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u8 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) rxrpc_serial_t serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) rxrpc_seq_t hard_ack, top, seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 mtu, jmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u8 *ackp = pkt->acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Barrier against rxrpc_input_data(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) serial = call->ackr_serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) hard_ack = READ_ONCE(call->rx_hard_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) top = smp_load_acquire(&call->rx_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *_hard_ack = hard_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *_top = top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pkt->ack.bufferSpace = htons(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pkt->ack.maxSkew = htons(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) pkt->ack.firstPacket = htonl(hard_ack + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pkt->ack.serial = htonl(serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pkt->ack.reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pkt->ack.nAcks = top - hard_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (reason == RXRPC_ACK_PING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) pkt->whdr.flags |= RXRPC_REQUEST_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (after(top, hard_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) seq = hard_ack + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ix = seq & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (call->rxtx_buffer[ix])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *ackp++ = RXRPC_ACK_TYPE_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *ackp++ = RXRPC_ACK_TYPE_NACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } while (before_eq(seq, top));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mtu = conn->params.peer->if_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) mtu -= conn->params.peer->hdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pkt->ackinfo.maxMTU = htonl(mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pkt->ackinfo.rwind = htonl(call->rx_winsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pkt->ackinfo.jumbo_max = htonl(jmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *ackp++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *ackp++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *ackp++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return top - hard_ack + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Record the beginning of an RTT probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) enum rxrpc_rtt_tx_trace why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long avail = call->rtt_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int rtt_slot = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto no_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto no_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) call->rtt_serial[rtt_slot] = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) call->rtt_sent_at[rtt_slot] = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) smp_wmb(); /* Write data before avail bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return rtt_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) no_slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Cancel an RTT probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rxrpc_serial_t serial, int rtt_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (rtt_slot != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) smp_wmb(); /* Clear pending bit before setting slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) set_bit(rtt_slot, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Send an ACK call packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) rxrpc_serial_t *_serial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct rxrpc_ack_buffer *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) rxrpc_serial_t serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rxrpc_seq_t hard_ack, top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) size_t len, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int ret, rtt_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u8 reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) conn = call->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) msg.msg_name = &call->peer->srx.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) msg.msg_namelen = call->peer->srx.transport_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) msg.msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) msg.msg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pkt->whdr.epoch = htonl(conn->proto.epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pkt->whdr.cid = htonl(call->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pkt->whdr.callNumber = htonl(call->call_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pkt->whdr.seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pkt->whdr.type = RXRPC_PACKET_TYPE_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pkt->whdr.userStatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) pkt->whdr.securityIndex = call->security_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pkt->whdr._rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pkt->whdr.serviceId = htons(call->service_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) reason = RXRPC_ACK_PING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) reason = call->ackr_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!call->ackr_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) call->ackr_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) iov[0].iov_base = pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) iov[1].iov_base = &pkt->ackinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) iov[1].iov_len = sizeof(pkt->ackinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) len = iov[0].iov_len + iov[1].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) serial = atomic_inc_return(&conn->serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) pkt->whdr.serial = htonl(serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) trace_rxrpc_tx_ack(call->debug_id, serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ntohl(pkt->ack.firstPacket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ntohl(pkt->ack.serial),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pkt->ack.reason, pkt->ack.nAcks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (_serial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (ping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) conn->params.peer->last_tx_at = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) trace_rxrpc_tx_fail(call->debug_id, serial, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rxrpc_tx_point_call_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) rxrpc_tx_point_call_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rxrpc_tx_backoff(call, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (call->state < RXRPC_CALL_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) rxrpc_propose_ACK(call, pkt->ack.reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ntohl(pkt->ack.serial),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rxrpc_propose_ack_retry_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (after(hard_ack, call->ackr_consumed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) call->ackr_consumed = hard_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (after(top, call->ackr_seen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) call->ackr_seen = top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rxrpc_set_keepalive(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) kfree(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Send an ABORT call packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int rxrpc_send_abort_packet(struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct rxrpc_abort_buffer pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct kvec iov[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) rxrpc_serial_t serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Don't bother sending aborts for a client call once the server has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * hard-ACK'd all of its request data. After that point, we're not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * going to stop the operation proceeding, and whilst we might limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * the reply, it's not worth it if we can send a new call on the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * channel instead, thereby closing off this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (rxrpc_is_client_call(call) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) test_bit(RXRPC_CALL_TX_LAST, &call->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) conn = call->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) msg.msg_name = &call->peer->srx.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) msg.msg_namelen = call->peer->srx.transport_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) msg.msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) msg.msg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pkt.whdr.epoch = htonl(conn->proto.epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pkt.whdr.cid = htonl(call->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pkt.whdr.callNumber = htonl(call->call_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pkt.whdr.seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pkt.whdr.flags = conn->out_clientflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pkt.whdr.userStatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pkt.whdr.securityIndex = call->security_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) pkt.whdr._rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pkt.whdr.serviceId = htons(call->service_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pkt.abort_code = htonl(call->abort_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) iov[0].iov_base = &pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) iov[0].iov_len = sizeof(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) serial = atomic_inc_return(&conn->serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pkt.whdr.serial = htonl(serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ret = kernel_sendmsg(conn->params.local->socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) &msg, iov, 1, sizeof(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) conn->params.peer->last_tx_at = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) trace_rxrpc_tx_fail(call->debug_id, serial, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rxrpc_tx_point_call_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rxrpc_tx_point_call_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) rxrpc_tx_backoff(call, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * send a packet through the transport endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bool retrans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct rxrpc_connection *conn = call->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct rxrpc_wire_header whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) rxrpc_serial_t serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int ret, rtt_slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) _enter(",{%d}", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (hlist_unhashed(&call->error_link)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) spin_lock_bh(&call->peer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) hlist_add_head_rcu(&call->error_link, &call->peer->error_targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_unlock_bh(&call->peer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Each transmission of a Tx packet needs a new serial number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) serial = atomic_inc_return(&conn->serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) whdr.epoch = htonl(conn->proto.epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) whdr.cid = htonl(call->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) whdr.callNumber = htonl(call->call_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) whdr.seq = htonl(sp->hdr.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) whdr.serial = htonl(serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) whdr.type = RXRPC_PACKET_TYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) whdr.flags = sp->hdr.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) whdr.userStatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) whdr.securityIndex = call->security_ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) whdr._rsvd = htons(sp->hdr._rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) whdr.serviceId = htons(call->service_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sp->hdr.seq == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) iov[0].iov_base = &whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) iov[0].iov_len = sizeof(whdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) iov[1].iov_base = skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) iov[1].iov_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) len = iov[0].iov_len + iov[1].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) msg.msg_name = &call->peer->srx.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) msg.msg_namelen = call->peer->srx.transport_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) msg.msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) msg.msg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* If our RTT cache needs working on, request an ACK. Also request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * ACKs if a DATA packet appears to have been lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * However, we mustn't request an ACK on the last reply packet of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * service call, lest OpenAFS incorrectly send us an ACK with some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * soft-ACKs in it and then never follow up with a proper hard ACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) rxrpc_to_server(sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) retrans ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) call->cong_mode == RXRPC_CALL_SLOW_START ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) (call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ktime_get_real())))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) whdr.flags |= RXRPC_REQUEST_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int lose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if ((lose++ & 7) == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) whdr.flags, retrans, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* send the packet with the don't fragment bit set if we currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * think it's small enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (iov[1].iov_len >= call->peer->maxdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto send_fragmentable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) down_read(&conn->params.local->defrag_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) sp->hdr.serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) smp_wmb(); /* Set serial before timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) skb->tstamp = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (whdr.flags & RXRPC_REQUEST_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* send the packet by UDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * - returns -EMSGSIZE if UDP would have to fragment the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * to go out of the interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * - in which case, we'll have processed the ICMP error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * message and update the peer record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) conn->params.peer->last_tx_at = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) up_read(&conn->params.local->defrag_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) trace_rxrpc_tx_fail(call->debug_id, serial, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rxrpc_tx_point_call_data_nofrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) trace_rxrpc_tx_packet(call->debug_id, &whdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rxrpc_tx_point_call_data_nofrag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rxrpc_tx_backoff(call, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (ret == -EMSGSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto send_fragmentable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (whdr.flags & RXRPC_REQUEST_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) call->peer->rtt_last_req = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (call->peer->rtt_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned long nowj = jiffies, ack_lost_at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ack_lost_at += nowj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) WRITE_ONCE(call->ack_lost_at, ack_lost_at);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) rxrpc_timer_set_for_lost_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (sp->hdr.seq == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) &call->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned long nowj = jiffies, expect_rx_by;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) expect_rx_by = nowj + call->next_rx_timo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) WRITE_ONCE(call->expect_rx_by, expect_rx_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rxrpc_timer_set_for_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) rxrpc_set_keepalive(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Cancel the call if the initial transmission fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * particularly if that's due to network routing issues that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * aren't going away anytime soon. The layer above can arrange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * the retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) RX_USER_ABORT, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) _leave(" = %d [%u]", ret, call->peer->maxdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) send_fragmentable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* attempt to send this message with fragmentation enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) _debug("send fragment");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) down_write(&conn->params.local->defrag_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) sp->hdr.serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) smp_wmb(); /* Set serial before timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) skb->tstamp = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (whdr.flags & RXRPC_REQUEST_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) switch (conn->params.local->srx.transport.family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ip_sock_set_mtu_discover(conn->params.local->socket->sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) IP_PMTUDISC_DONT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ret = kernel_sendmsg(conn->params.local->socket, &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) iov, 2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) conn->params.peer->last_tx_at = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ip_sock_set_mtu_discover(conn->params.local->socket->sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) IP_PMTUDISC_DO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) trace_rxrpc_tx_fail(call->debug_id, serial, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rxrpc_tx_point_call_data_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) trace_rxrpc_tx_packet(call->debug_id, &whdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rxrpc_tx_point_call_data_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rxrpc_tx_backoff(call, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) up_write(&conn->params.local->defrag_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * reject packets through the local endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void rxrpc_reject_packets(struct rxrpc_local *local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct sockaddr_rxrpc srx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct rxrpc_skb_priv *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct rxrpc_wire_header whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __be32 code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) int ret, ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) _enter("%d", local->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) iov[0].iov_base = &whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) iov[0].iov_len = sizeof(whdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) iov[1].iov_base = &code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) iov[1].iov_len = sizeof(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) msg.msg_name = &srx.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) msg.msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) msg.msg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) memset(&whdr, 0, sizeof(whdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) while ((skb = skb_dequeue(&local->reject_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) rxrpc_see_skb(skb, rxrpc_skb_seen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) switch (skb->mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) case RXRPC_SKB_MARK_REJECT_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) whdr.type = RXRPC_PACKET_TYPE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) size = sizeof(whdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ioc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) case RXRPC_SKB_MARK_REJECT_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) whdr.type = RXRPC_PACKET_TYPE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) code = htonl(skb->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size = sizeof(whdr) + sizeof(code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ioc = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) msg.msg_namelen = srx.transport_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) whdr.epoch = htonl(sp->hdr.epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) whdr.cid = htonl(sp->hdr.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) whdr.callNumber = htonl(sp->hdr.callNumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) whdr.serviceId = htons(sp->hdr.serviceId);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) whdr.flags = sp->hdr.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) whdr.flags ^= RXRPC_CLIENT_INITIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) whdr.flags &= RXRPC_CLIENT_INITIATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ret = kernel_sendmsg(local->socket, &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) iov, ioc, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) trace_rxrpc_tx_fail(local->debug_id, 0, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) rxrpc_tx_point_reject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) trace_rxrpc_tx_packet(local->debug_id, &whdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rxrpc_tx_point_reject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * Send a VERSION reply to a peer as a keepalive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void rxrpc_send_keepalive(struct rxrpc_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct rxrpc_wire_header whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct kvec iov[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) _enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) msg.msg_name = &peer->srx.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) msg.msg_namelen = peer->srx.transport_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) msg.msg_controllen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) msg.msg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) whdr.epoch = htonl(peer->local->rxnet->epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) whdr.cid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) whdr.callNumber = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) whdr.seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) whdr.serial = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) whdr.flags = RXRPC_LAST_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) whdr.userStatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) whdr.securityIndex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) whdr._rsvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) whdr.serviceId = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) iov[0].iov_base = &whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) iov[0].iov_len = sizeof(whdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) iov[1].iov_base = (char *)rxrpc_keepalive_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) iov[1].iov_len = sizeof(rxrpc_keepalive_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) len = iov[0].iov_len + iov[1].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) _proto("Tx VERSION (keepalive)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rxrpc_tx_point_version_keepalive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) trace_rxrpc_tx_packet(peer->debug_id, &whdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rxrpc_tx_point_version_keepalive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) peer->last_tx_at = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) _leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }