Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* RxRPC packet reception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/errqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <net/af_rxrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "ar-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) static void rxrpc_proto_abort(const char *why,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 			      struct rxrpc_call *call, rxrpc_seq_t seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 		rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * Do TCP-style congestion management [RFC 5681].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) static void rxrpc_congestion_management(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 					struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 					struct rxrpc_ack_summary *summary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 					rxrpc_serial_t acked_serial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	enum rxrpc_congest_change change = rxrpc_cong_no_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	unsigned int cumulative_acks = call->cong_cumul_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	unsigned int cwnd = call->cong_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	bool resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	summary->flight_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		(call->tx_top - call->tx_hard_ack) - summary->nr_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		summary->retrans_timeo = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		call->cong_ssthresh = max_t(unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 					    summary->flight_size / 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		cwnd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		if (cwnd >= call->cong_ssthresh &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		    call->cong_mode == RXRPC_CALL_SLOW_START) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 			call->cong_tstamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 			cumulative_acks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	cumulative_acks += summary->nr_new_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	cumulative_acks += summary->nr_rot_new_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	if (cumulative_acks > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		cumulative_acks = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	summary->mode = call->cong_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	summary->cwnd = call->cong_cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	summary->ssthresh = call->cong_ssthresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	summary->cumulative_acks = cumulative_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	summary->dup_acks = call->cong_dup_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	switch (call->cong_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	case RXRPC_CALL_SLOW_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		if (summary->nr_nacks > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 			goto packet_loss_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		if (summary->cumulative_acks > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 			cwnd += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		if (cwnd >= call->cong_ssthresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 			call->cong_tstamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	case RXRPC_CALL_CONGEST_AVOIDANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		if (summary->nr_nacks > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			goto packet_loss_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		/* We analyse the number of packets that get ACK'd per RTT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		 * period and increase the window if we managed to fill it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		if (call->peer->rtt_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		if (ktime_before(skb->tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 				 ktime_add_us(call->cong_tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 					      call->peer->srtt_us >> 3)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 			goto out_no_clear_ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		change = rxrpc_cong_rtt_window_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		call->cong_tstamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		if (cumulative_acks >= cwnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			cwnd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	case RXRPC_CALL_PACKET_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		if (summary->nr_nacks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			goto resume_normality;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		if (summary->new_low_nack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			change = rxrpc_cong_new_low_nack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			call->cong_dup_acks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			if (call->cong_extra > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 				call->cong_extra = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			goto send_extra_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		call->cong_dup_acks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		if (call->cong_dup_acks < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			goto send_extra_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		change = rxrpc_cong_begin_retransmission;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		call->cong_ssthresh = max_t(unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 					    summary->flight_size / 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		cwnd = call->cong_ssthresh + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		call->cong_extra = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		call->cong_dup_acks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		resend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	case RXRPC_CALL_FAST_RETRANSMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		if (!summary->new_low_nack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			if (summary->nr_new_acks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 				cwnd += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			call->cong_dup_acks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			if (call->cong_dup_acks == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 				change = rxrpc_cong_retransmit_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 				call->cong_dup_acks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 				resend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			change = rxrpc_cong_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 			cwnd = call->cong_ssthresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			if (summary->nr_nacks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 				goto resume_normality;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) resume_normality:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	change = rxrpc_cong_cleared_nacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	call->cong_dup_acks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	call->cong_extra = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	call->cong_tstamp = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	if (cwnd < call->cong_ssthresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		call->cong_mode = RXRPC_CALL_SLOW_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	cumulative_acks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) out_no_clear_ca:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	call->cong_cwnd = cwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	call->cong_cumul_acks = cumulative_acks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	trace_rxrpc_congest(call, summary, acked_serial, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) packet_loss_detected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	change = rxrpc_cong_saw_nack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	call->cong_mode = RXRPC_CALL_PACKET_LOSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	call->cong_dup_acks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	goto send_extra_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) send_extra_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/* Send some previously unsent DATA if we have some to advance the ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	    RXRPC_TX_ANNO_LAST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	    summary->nr_acks != call->tx_top - call->tx_hard_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		call->cong_extra++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		wake_up(&call->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	goto out_no_clear_ca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * Apply a hard ACK by advancing the Tx window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 				   struct rxrpc_ack_summary *summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	struct sk_buff *skb, *list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	bool rot_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	int ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	u8 annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (call->acks_lowest_nak == call->tx_hard_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		call->acks_lowest_nak = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	} else if (before_eq(call->acks_lowest_nak, to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		summary->new_low_nack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		call->acks_lowest_nak = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	spin_lock(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	while (before(call->tx_hard_ack, to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		call->tx_hard_ack++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		skb = call->rxtx_buffer[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		annotation = call->rxtx_annotations[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		rxrpc_see_skb(skb, rxrpc_skb_rotated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		call->rxtx_buffer[ix] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		call->rxtx_annotations[ix] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		skb->next = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		list = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		if (annotation & RXRPC_TX_ANNO_LAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			set_bit(RXRPC_CALL_TX_LAST, &call->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			rot_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			summary->nr_rot_new_acks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	spin_unlock(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	trace_rxrpc_transmit(call, (rot_last ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 				    rxrpc_transmit_rotate_last :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 				    rxrpc_transmit_rotate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	wake_up(&call->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	while (list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		skb = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		list = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		skb_mark_not_on_list(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	return rot_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * End the transmission phase of a call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * or a final ACK packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			       const char *abort_why)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	write_lock(&call->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	state = call->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	case RXRPC_CALL_CLIENT_SEND_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		if (reply_begun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	case RXRPC_CALL_SERVER_AWAIT_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		__rxrpc_call_completed(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		state = call->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		goto bad_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	write_unlock(&call->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		trace_rxrpc_transmit(call, rxrpc_transmit_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	_leave(" = ok");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) bad_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	write_unlock(&call->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	kdebug("end_tx %s", rxrpc_call_states[call->state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	rxrpc_proto_abort(abort_why, call, call->tx_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * Begin the reply reception phase of a call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static bool rxrpc_receiving_reply(struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	struct rxrpc_ack_summary summary = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	unsigned long now, timo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	rxrpc_seq_t top = READ_ONCE(call->tx_top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (call->ackr_reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		call->ackr_reason = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		timo = now + MAX_JIFFY_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		WRITE_ONCE(call->resend_at, timo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		WRITE_ONCE(call->ack_at, timo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		if (!rxrpc_rotate_tx_window(call, top, &summary)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			rxrpc_proto_abort("TXL", call, top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (!rxrpc_end_tx_phase(call, true, "ETD"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	call->tx_phase = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * Scan a data packet to validate its structure and to work out how many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * subpackets it contains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * A jumbo packet is a collection of consecutive packets glued together with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * little headers between that indicate how to change the initial header for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * each subpacket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * the last are RXRPC_JUMBO_DATALEN in size.  The last subpacket may be of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) static bool rxrpc_validate_data(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	unsigned int offset = sizeof(struct rxrpc_wire_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	unsigned int len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u8 flags = sp->hdr.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		if (flags & RXRPC_REQUEST_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			__set_bit(sp->nr_subpackets, sp->rx_req_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		sp->nr_subpackets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		if (!(flags & RXRPC_JUMBO_PACKET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			goto protocol_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		if (flags & RXRPC_LAST_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			goto protocol_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		offset += RXRPC_JUMBO_DATALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		if (skb_copy_bits(skb, offset, &flags, 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			goto protocol_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		offset += sizeof(struct rxrpc_jumbo_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (flags & RXRPC_LAST_PACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		sp->rx_flags |= RXRPC_SKB_INCL_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) protocol_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * Handle reception of a duplicate packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  * We have to take care to avoid an attack here whereby we're given a series of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381)  * jumbograms, each with a sequence number one before the preceding one and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * filled up to maximum UDP size.  If they never send us the first packet in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * the sequence, they can cause us to have to hold on to around 2MiB of kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * space until the call times out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * We limit the space usage by only accepting three duplicate jumbo packets per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  * call.  After that, we tell the other side we're no longer accepting jumbos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  * (that information is encoded in the ACK packet).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 				 bool is_jumbo, bool *_jumbo_bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	/* Discard normal packets that are duplicates. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	if (is_jumbo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* Skip jumbo subpackets that are duplicates.  When we've had three or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 * more partially duplicate jumbo packets, we refuse to take any more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 * jumbos for this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (!*_jumbo_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		call->nr_jumbo_bad++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		*_jumbo_bad = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * Process a DATA packet, adding the packet to the Rx ring.  The caller's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * packet ref must be passed on or discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	enum rxrpc_call_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	unsigned int j, nr_subpackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	bool immediate_ack = false, jumbo_bad = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	u8 ack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	_enter("{%u,%u},{%u,%u}",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	       call->rx_hard_ack, call->rx_top, skb->len, seq0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	_proto("Rx DATA %%%u { #%u f=%02x n=%u }",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	       sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	state = READ_ONCE(call->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (state >= RXRPC_CALL_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		unsigned long timo = READ_ONCE(call->next_req_timo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		unsigned long now, expect_req_by;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (timo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			expect_req_by = now + timo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			WRITE_ONCE(call->expect_req_by, expect_req_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			rxrpc_reduce_call_timer(call, expect_req_by, now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 						rxrpc_timer_set_for_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	spin_lock(&call->input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* Received data implicitly ACKs all of the request packets we sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * when we're acting as a client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	     state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	    !rxrpc_receiving_reply(call))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	call->ackr_prev_seq = seq0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	hard_ack = READ_ONCE(call->rx_hard_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	nr_subpackets = sp->nr_subpackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (nr_subpackets > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		if (call->nr_jumbo_bad > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			ack = RXRPC_ACK_NOSPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			goto ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	for (j = 0; j < nr_subpackets; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		rxrpc_serial_t serial = sp->hdr.serial + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		rxrpc_seq_t seq = seq0 + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		bool terminal = (j == nr_subpackets - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		u8 flags, annotation = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		_proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		     j, serial, seq, terminal, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			    seq != call->rx_top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 				rxrpc_proto_abort("LSN", call, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			    after_eq(seq, call->rx_top)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 				rxrpc_proto_abort("LSA", call, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			flags |= RXRPC_LAST_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		if (!terminal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			flags |= RXRPC_JUMBO_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		if (test_bit(j, sp->rx_req_ack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			flags |= RXRPC_REQUEST_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		if (before_eq(seq, hard_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			ack = RXRPC_ACK_DUPLICATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		if (call->rxtx_buffer[ix]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			rxrpc_input_dup_data(call, seq, nr_subpackets > 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 					     &jumbo_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			if (ack != RXRPC_ACK_DUPLICATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 				ack = RXRPC_ACK_DUPLICATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			immediate_ack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		if (after(seq, hard_ack + call->rx_winsize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			ack = RXRPC_ACK_EXCEEDS_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			if (flags & RXRPC_JUMBO_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 				if (!jumbo_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 					call->nr_jumbo_bad++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 					jumbo_bad = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			goto ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (flags & RXRPC_REQUEST_ACK && !ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			ack = RXRPC_ACK_REQUESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		/* Queue the packet.  We use a couple of memory barriers here as need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		 * to make sure that rx_top is perceived to be set after the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		 * pointer and that the buffer pointer is set after the annotation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		 * the skb data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		 * and also rxrpc_fill_out_ack().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (!terminal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			rxrpc_get_skb(skb, rxrpc_skb_got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		call->rxtx_annotations[ix] = annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		call->rxtx_buffer[ix] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		if (after(seq, call->rx_top)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			smp_store_release(&call->rx_top, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		} else if (before(seq, call->rx_top)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			/* Send an immediate ACK if we fill in a hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			if (!ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				ack = RXRPC_ACK_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 				ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			immediate_ack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		if (terminal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			/* From this point on, we're not allowed to touch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			 * packet any longer as its ref now belongs to the Rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			 * ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			sp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			set_bit(RXRPC_CALL_RX_LAST, &call->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			if (!ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 				ack = RXRPC_ACK_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		if (after_eq(seq, call->rx_expect_next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			if (after(seq, call->rx_expect_next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				_net("OOS %u > %u", seq, call->rx_expect_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				ack = RXRPC_ACK_OUT_OF_SEQUENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 				ack_serial = serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			call->rx_expect_next = seq + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) ack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		rxrpc_propose_ACK(call, ack, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				  immediate_ack, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				  rxrpc_propose_ack_input_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 				  false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 				  rxrpc_propose_ack_input_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	trace_rxrpc_notify_socket(call->debug_id, serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	rxrpc_notify_socket(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	spin_unlock(&call->input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	_leave(" [queued]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * See if there's a cached RTT probe to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				     ktime_t resp_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				     rxrpc_serial_t acked_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				     rxrpc_serial_t ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 				     enum rxrpc_rtt_rx_trace type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	rxrpc_serial_t orig_serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	unsigned long avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	ktime_t sent_at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	bool matched = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	avail = READ_ONCE(call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	smp_rmb(); /* Read avail bits before accessing data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		sent_at = call->rtt_sent_at[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		orig_serial = call->rtt_serial[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (orig_serial == acked_serial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			smp_mb(); /* Read data before setting avail bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			set_bit(i, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			if (type != rxrpc_rtt_rx_cancel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 				rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 						   sent_at, resp_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 						   orig_serial, acked_serial, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			matched = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		/* If a later serial is being acked, then mark this slot as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		 * being available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		if (after(acked_serial, orig_serial)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 					   orig_serial, acked_serial, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			set_bit(i, &call->rtt_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (!matched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * Process the response to a ping that we sent to find out if we lost an ACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  * If we got back a ping response that indicates a lower tx_top than what we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * had at the time of the ping transmission, we adjudge all the DATA packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * sent between the response tx_top and the ping-time tx_top to have been lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	rxrpc_seq_t top, bottom, seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	bool resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	spin_lock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	bottom = call->tx_hard_ack + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	top = call->acks_lost_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (before(bottom, top)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		for (seq = bottom; before_eq(seq, top); seq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			int ix = seq & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			u8 annotation = call->rxtx_annotations[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			if (anno_type != RXRPC_TX_ANNO_UNACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			annotation &= ~RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			annotation |= RXRPC_TX_ANNO_RETRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			call->rxtx_annotations[ix] = annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			resend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	spin_unlock_bh(&call->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * Process a ping response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) static void rxrpc_input_ping_response(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				      ktime_t resp_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				      rxrpc_serial_t acked_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 				      rxrpc_serial_t ack_serial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (acked_serial == call->acks_lost_ping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		rxrpc_input_check_for_lost_ack(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * Process the extra information that may be appended to an ACK packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 				struct rxrpc_ackinfo *ackinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct rxrpc_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	unsigned int mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	bool wake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	u32 rwind = ntohl(ackinfo->rwind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	       sp->hdr.serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	       ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	       rwind, ntohl(ackinfo->jumbo_max));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		rwind = RXRPC_RXTX_BUFF_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (call->tx_winsize != rwind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		if (rwind > call->tx_winsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			wake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		call->tx_winsize = rwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (call->cong_ssthresh > rwind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		call->cong_ssthresh = rwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	peer = call->peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (mtu < peer->maxdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		spin_lock_bh(&peer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		peer->maxdata = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		peer->mtu = mtu + peer->hdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		spin_unlock_bh(&peer->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		wake_up(&call->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * Process individual soft ACKs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * Each ACK in the array corresponds to one packet and can be either an ACK or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * a NAK.  If we get find an explicitly NAK'd packet we resend immediately;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * packets that lie beyond the end of the ACK list are scheduled for resend by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * the timer on the basis that the peer might just not have processed them at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * the time the ACK was sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				  rxrpc_seq_t seq, int nr_acks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				  struct rxrpc_ack_summary *summary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	int ix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	u8 annotation, anno_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	for (; nr_acks > 0; nr_acks--, seq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		ix = seq & RXRPC_RXTX_BUFF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		annotation = call->rxtx_annotations[ix];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		anno_type = annotation & RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		annotation &= ~RXRPC_TX_ANNO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		switch (*acks++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		case RXRPC_ACK_TYPE_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			summary->nr_acks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			if (anno_type == RXRPC_TX_ANNO_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			summary->nr_new_acks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			call->rxtx_annotations[ix] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 				RXRPC_TX_ANNO_ACK | annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		case RXRPC_ACK_TYPE_NACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			if (!summary->nr_nacks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			    call->acks_lowest_nak != seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				call->acks_lowest_nak = seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				summary->new_low_nack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			summary->nr_nacks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			if (anno_type == RXRPC_TX_ANNO_NAK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			summary->nr_new_nacks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			if (anno_type == RXRPC_TX_ANNO_RETRANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			call->rxtx_annotations[ix] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 				RXRPC_TX_ANNO_NAK | annotation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			return rxrpc_proto_abort("SFT", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * Return true if the ACK is valid - ie. it doesn't appear to have regressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  * with respect to the ack state conveyed by preceding ACKs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			       rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (after(first_pkt, base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		return true; /* The window advanced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (before(first_pkt, base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		return false; /* firstPacket regressed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (after_eq(prev_pkt, call->ackr_prev_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		return true; /* previousPacket hasn't regressed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	/* Some rx implementations put a serial number in previousPacket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (after_eq(prev_pkt, base + call->tx_winsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * Process an ACK packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * in the ACK array.  Anything before that is hard-ACK'd and may be discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * A hard-ACK means that a packet has been processed and may be discarded; a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * soft-ACK means that the packet may be discarded and retransmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * requested.  A phase is complete when all packets are hard-ACK'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	struct rxrpc_ack_summary summary = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		struct rxrpc_ackpacket ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		struct rxrpc_ackinfo info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		u8 acks[RXRPC_MAXACKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	} buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	rxrpc_serial_t ack_serial, acked_serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	int nr_acks, offset, ioffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	offset = sizeof(struct rxrpc_wire_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		_debug("extraction failure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return rxrpc_proto_abort("XAK", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	offset += sizeof(buf.ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	ack_serial = sp->hdr.serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	acked_serial = ntohl(buf.ack.serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	first_soft_ack = ntohl(buf.ack.firstPacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	prev_pkt = ntohl(buf.ack.previousPacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	hard_ack = first_soft_ack - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	nr_acks = buf.ack.nAcks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			      buf.ack.reason : RXRPC_ACK__INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			   first_soft_ack, prev_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			   summary.ack_reason, nr_acks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	switch (buf.ack.reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	case RXRPC_ACK_PING_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 					  ack_serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 					 rxrpc_rtt_rx_ping_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	case RXRPC_ACK_REQUESTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 					 rxrpc_rtt_rx_requested_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		if (acked_serial != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 						 rxrpc_rtt_rx_cancel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (buf.ack.reason == RXRPC_ACK_PING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		_proto("Rx ACK %%%u PING Request", ack_serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				  ack_serial, true, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 				  rxrpc_propose_ack_respond_to_ping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 				  ack_serial, true, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				  rxrpc_propose_ack_respond_to_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	/* Discard any out-of-order or duplicate ACKs (outside lock). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 					   first_soft_ack, call->ackr_first_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 					   prev_pkt, call->ackr_prev_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	buf.info.rxMTU = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	ioffset = offset + nr_acks + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (skb->len >= ioffset + sizeof(buf.info) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	    skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		return rxrpc_proto_abort("XAI", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	spin_lock(&call->input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/* Discard any out-of-order or duplicate ACKs (inside lock). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 					   first_soft_ack, call->ackr_first_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 					   prev_pkt, call->ackr_prev_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	call->acks_latest_ts = skb->tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	call->ackr_first_seq = first_soft_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	call->ackr_prev_seq = prev_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/* Parse rwind and mtu sizes if provided. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (buf.info.rxMTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		rxrpc_input_ackinfo(call, skb, &buf.info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (first_soft_ack == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		rxrpc_proto_abort("AK0", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	/* Ignore ACKs unless we are or have just been transmitting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	switch (READ_ONCE(call->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	case RXRPC_CALL_CLIENT_SEND_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	case RXRPC_CALL_SERVER_SEND_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	case RXRPC_CALL_SERVER_AWAIT_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (before(hard_ack, call->tx_hard_ack) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	    after(hard_ack, call->tx_top)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		rxrpc_proto_abort("AKW", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (nr_acks > call->tx_top - hard_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		rxrpc_proto_abort("AKN", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (after(hard_ack, call->tx_hard_ack)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			rxrpc_end_tx_phase(call, false, "ETA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (nr_acks > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			rxrpc_proto_abort("XSA", call, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 				      &summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	    RXRPC_TX_ANNO_LAST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	    summary.nr_acks == call->tx_top - hard_ack &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	    rxrpc_is_client_call(call))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				  false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				  rxrpc_propose_ack_ping_for_lost_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	rxrpc_congestion_management(call, skb, &summary, acked_serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	spin_unlock(&call->input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * Process an ACKALL packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct rxrpc_ack_summary summary = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	_proto("Rx ACKALL %%%u", sp->hdr.serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	spin_lock(&call->input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		rxrpc_end_tx_phase(call, false, "ETL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	spin_unlock(&call->input_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * Process an ABORT packet directed at a call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	__be32 wtmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	u32 abort_code = RX_CALL_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	_enter("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (skb->len >= 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	    skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			  &wtmp, sizeof(wtmp)) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		abort_code = ntohl(wtmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 				  abort_code, -ECONNABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  * Process an incoming call packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static void rxrpc_input_call_packet(struct rxrpc_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				    struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	unsigned long timo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	_enter("%p,%p", call, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	timo = READ_ONCE(call->next_rx_timo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (timo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		unsigned long now = jiffies, expect_rx_by;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		expect_rx_by = now + timo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		WRITE_ONCE(call->expect_rx_by, expect_rx_by);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		rxrpc_reduce_call_timer(call, expect_rx_by, now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 					rxrpc_timer_set_for_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	switch (sp->hdr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	case RXRPC_PACKET_TYPE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		rxrpc_input_data(call, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		goto no_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	case RXRPC_PACKET_TYPE_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		rxrpc_input_ack(call, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	case RXRPC_PACKET_TYPE_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		_proto("Rx BUSY %%%u", sp->hdr.serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		/* Just ignore BUSY packets from the server; the retry and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		 * lifespan timers will take care of business.  BUSY packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		 * from the client don't make sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	case RXRPC_PACKET_TYPE_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		rxrpc_input_abort(call, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	case RXRPC_PACKET_TYPE_ACKALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		rxrpc_input_ackall(call, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) no_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	_leave("");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * Handle a new service call on a channel implicitly completing the preceding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * call on that channel.  This does not apply to client conns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  * TODO: If callNumber > call_id + 1, renegotiate security.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 					  struct rxrpc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 					  struct rxrpc_call *call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	switch (READ_ONCE(call->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	case RXRPC_CALL_SERVER_AWAIT_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		rxrpc_call_completed(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	case RXRPC_CALL_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			set_bit(RXRPC_CALL_EV_ABORT, &call->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			rxrpc_queue_call(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		trace_rxrpc_improper_term(call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	spin_lock(&rx->incoming_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	__rxrpc_disconnect_call(conn, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	spin_unlock(&rx->incoming_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * post connection-level events to the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * - this includes challenges, responses, some aborts and call terminal packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  *   retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 				      struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	_enter("%p,%p", conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	skb_queue_tail(&conn->rx_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	rxrpc_queue_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * post endpoint-level events to the local endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  * - this includes debug and version messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 				       struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	_enter("%p,%p", local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (rxrpc_get_local_maybe(local)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		skb_queue_tail(&local->event_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		rxrpc_queue_local(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  * put a packet up for transport-level abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	CHECK_SLAB_OKAY(&local->usage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (rxrpc_get_local_maybe(local)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		skb_queue_tail(&local->reject_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		rxrpc_queue_local(local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * Extract the wire header from a packet and translate the byte order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct rxrpc_wire_header whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	/* dig out the RxRPC connection details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 				      tracepoint_string("bad_hdr"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	memset(sp, 0, sizeof(*sp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	sp->hdr.epoch		= ntohl(whdr.epoch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	sp->hdr.cid		= ntohl(whdr.cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	sp->hdr.callNumber	= ntohl(whdr.callNumber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	sp->hdr.seq		= ntohl(whdr.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	sp->hdr.serial		= ntohl(whdr.serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	sp->hdr.flags		= whdr.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	sp->hdr.type		= whdr.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	sp->hdr.userStatus	= whdr.userStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	sp->hdr.securityIndex	= whdr.securityIndex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	sp->hdr._rsvd		= ntohs(whdr._rsvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	sp->hdr.serviceId	= ntohs(whdr.serviceId);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  * handle data received on the local endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  * - may be called in interrupt context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)  * [!] Note that as this is called from the encap_rcv hook, the socket is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  * held locked by the caller and nothing prevents sk_user_data on the UDP from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * being cleared in the middle of processing this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  * Called with the RCU read lock held from the IP layer via UDP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct rxrpc_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	struct rxrpc_channel *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	struct rxrpc_call *call = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	struct rxrpc_skb_priv *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct rxrpc_peer *peer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	struct rxrpc_sock *rx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	unsigned int channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	_enter("%p", udp_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (unlikely(!local)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	if (skb->tstamp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		skb->tstamp = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	rxrpc_new_skb(skb, rxrpc_skb_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	skb_pull(skb, sizeof(struct udphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	/* The UDP protocol already released all skb resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	 * we are free to add our own data there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	/* dig out the RxRPC connection details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (rxrpc_extract_header(sp, skb) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		static int lose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		if ((lose++ & 7) == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			trace_rxrpc_rx_lose(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			rxrpc_free_skb(skb, rxrpc_skb_lost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (skb->tstamp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		skb->tstamp = ktime_get_real();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	trace_rxrpc_rx_packet(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	switch (sp->hdr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	case RXRPC_PACKET_TYPE_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		if (rxrpc_to_client(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		rxrpc_post_packet_to_local(local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	case RXRPC_PACKET_TYPE_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (rxrpc_to_server(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	case RXRPC_PACKET_TYPE_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	case RXRPC_PACKET_TYPE_ACKALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		if (sp->hdr.callNumber == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	case RXRPC_PACKET_TYPE_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	case RXRPC_PACKET_TYPE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		if (sp->hdr.callNumber == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		    sp->hdr.seq == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		if (!rxrpc_validate_data(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		/* Unshare the packet so that it can be modified for in-place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		 * decryption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		if (sp->hdr.securityIndex != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			if (!nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			if (nskb != skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 				rxrpc_eaten_skb(skb, rxrpc_skb_received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 				skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				rxrpc_new_skb(skb, rxrpc_skb_unshared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 				sp = rxrpc_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	case RXRPC_PACKET_TYPE_CHALLENGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		if (rxrpc_to_server(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	case RXRPC_PACKET_TYPE_RESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		if (rxrpc_to_client(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		/* Packet types 9-11 should just be ignored. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	case RXRPC_PACKET_TYPE_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	case RXRPC_PACKET_TYPE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	case RXRPC_PACKET_TYPE_11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		_proto("Rx Bad Packet Type %u", sp->hdr.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if (sp->hdr.serviceId == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (rxrpc_to_server(sp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		/* Weed out packets to services we're not offering.  Packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		 * that would begin a call are explicitly rejected and the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		 * are just discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		rx = rcu_dereference(local->service);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			    sp->hdr.serviceId != rx->second_service)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			    sp->hdr.seq == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 				goto unsupported_service;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		if (sp->hdr.securityIndex != conn->security_ix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			goto wrong_security;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		if (sp->hdr.serviceId != conn->service_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			int old_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 				goto reupgrade;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			old_id = cmpxchg(&conn->service_id, conn->params.service_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 					 sp->hdr.serviceId);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			if (old_id != conn->params.service_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			    old_id != sp->hdr.serviceId)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 				goto reupgrade;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		if (sp->hdr.callNumber == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			/* Connection-level packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			_debug("CONN %p {%d}", conn, conn->debug_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			rxrpc_post_packet_to_conn(conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			conn->hi_serial = sp->hdr.serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		/* Call-bound packets are routed by connection channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		channel = sp->hdr.cid & RXRPC_CHANNELMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		chan = &conn->channels[channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		/* Ignore really old calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		if (sp->hdr.callNumber < chan->last_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		if (sp->hdr.callNumber == chan->last_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			if (chan->call ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			/* For the previous service call, if completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			 * successfully, we discard all further packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			if (rxrpc_conn_is_service(conn) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			    chan->last_type == RXRPC_PACKET_TYPE_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 				goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			/* But otherwise we need to retransmit the final packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			 * from data cached in the connection record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				trace_rxrpc_rx_data(chan->call_debug_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 						    sp->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 						    sp->hdr.serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 						    sp->hdr.flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			rxrpc_post_packet_to_conn(conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		call = rcu_dereference(chan->call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		if (sp->hdr.callNumber > chan->call_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			if (rxrpc_to_client(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 				goto reject_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			if (call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				rxrpc_input_implicit_end_call(rx, conn, call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			call = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		if (call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			if (sp->hdr.serviceId != call->service_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				call->service_id = sp->hdr.serviceId;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				call->rx_serial = sp->hdr.serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 				set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (!call || atomic_read(&call->usage) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		if (rxrpc_to_client(sp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			goto bad_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		if (sp->hdr.seq != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		call = rxrpc_new_incoming_call(local, rx, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		if (!call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 			goto reject_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	/* Process a call packet; this either discards or passes on the ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	 * elsewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	rxrpc_input_call_packet(call, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	rxrpc_free_skb(skb, rxrpc_skb_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	trace_rxrpc_rx_done(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) wrong_security:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			  RXKADINCONSISTENCY, EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	skb->priority = RXKADINCONSISTENCY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	goto post_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) unsupported_service:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 			  RX_INVALID_OPERATION, EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	skb->priority = RX_INVALID_OPERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	goto post_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) reupgrade:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			  RX_PROTOCOL_ERROR, EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	goto protocol_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) bad_message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			  RX_PROTOCOL_ERROR, EBADMSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) protocol_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	skb->priority = RX_PROTOCOL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) post_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) reject_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	trace_rxrpc_rx_done(skb->mark, skb->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	rxrpc_reject_packet(local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	_leave(" [badmsg]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }