Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* SCTP kernel implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * (C) Copyright IBM Corp. 2001, 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 1999 Cisco, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 1999-2001 Motorola, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * This file is part of the SCTP kernel implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * These functions work with the state functions in sctp_sm_statefuns.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * to implement that state operations.  These functions implement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * steps which require modifying existing data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Please send any bug reports or fixes you make to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * email address(es):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *    lksctp developers <linux-sctp@vger.kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * Written or modified by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *    La Monte H.P. Yarroll <piggy@acm.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *    Karl Knutson          <karl@athena.chicago.il.us>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *    Jon Grimm             <jgrimm@austin.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *    Hui Huang		    <hui.huang@nokia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *    Dajiang Zhang	    <dajiang.zhang@nokia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *    Daisy Chang	    <daisyc@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *    Sridhar Samudrala	    <sri@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  *    Ardelle Fan	    <ardelle.fan@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <net/sctp/sctp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <net/sctp/sm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <net/sctp/stream_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static int sctp_cmd_interpreter(enum sctp_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 				union sctp_subtype subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 				enum sctp_state state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 				struct sctp_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 				struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 				void *event_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 				enum sctp_disposition status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 				struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 				gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static int sctp_side_effects(enum sctp_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 			     union sctp_subtype subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 			     enum sctp_state state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 			     struct sctp_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 			     struct sctp_association **asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 			     void *event_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 			     enum sctp_disposition status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 			     struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 			     gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * Helper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  ********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /* A helper function for delayed processing of INET ECN CE bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 				__u32 lowest_tsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	/* Save the TSN away for comparison when we receive CWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	asoc->last_ecne_tsn = lowest_tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	asoc->need_ecne = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) /* Helper function for delayed processing of SCTP ECNE chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /* RFC 2960 Appendix A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * RFC 2481 details a specific bit for a sender to send in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * the header of its next outbound TCP segment to indicate to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * its peer that it has reduced its congestion window.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * is termed the CWR bit.  For SCTP the same indication is made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * by including the CWR chunk.  This chunk contains one data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * element, i.e. the TSN number that was sent in the ECNE chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * This element represents the lowest TSN number in the datagram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * that was originally marked with the CE bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 						__u32 lowest_tsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 						struct sctp_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct sctp_chunk *repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	/* Our previously transmitted packet ran into some congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * so we should take action by reducing cwnd and ssthresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * and then ACK our peer that we we've done so by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * sending a CWR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	/* First, try to determine if we want to actually lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 * our cwnd variables.  Only lower them if the ECNE looks more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 * recent than the last response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		struct sctp_transport *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		/* Find which transport's congestion variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		 * need to be adjusted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		/* Update the congestion variables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		if (transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			sctp_transport_lower_cwnd(transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 						  SCTP_LOWER_CWND_ECNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		asoc->last_cwr_tsn = lowest_tsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	/* Always try to quiet the other end.  In case of lost CWR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	 * resend last_cwr_tsn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	/* If we run out of memory, it will look like a lost CWR.  We'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	 * get back in sync eventually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	return repl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) /* Helper function to do delayed processing of ECN CWR chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				 __u32 lowest_tsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	/* Turn off ECNE getting auto-prepended to every outgoing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	 * packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	asoc->need_ecne = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) /* Generate SACK if necessary.  We call this at the end of a packet.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static int sctp_gen_sack(struct sctp_association *asoc, int force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			 struct sctp_cmd_seq *commands)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	struct sctp_transport *trans = asoc->peer.last_data_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	__u32 ctsn, max_tsn_seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	struct sctp_chunk *sack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	if (force ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	    (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	    (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		asoc->peer.sack_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	/* From 12.2 Parameters necessary per association (i.e. the TCB):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	 * Ack State : This flag indicates if the next received packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	 * 	     : is to be responded to with a SACK. ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	 *	     : When DATA chunks are out of order, SACK's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	 *           : are not delayed (see Section 6).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	 * [This is actually not mentioned in Section 6, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	 * implement it here anyway. --piggy]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (max_tsn_seen != ctsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		asoc->peer.sack_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* From 6.2  Acknowledgement on Reception of DATA Chunks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	 * an acknowledgement SHOULD be generated for at least every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	 * second packet (not every second DATA chunk) received, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	 * SHOULD be generated within 200 ms of the arrival of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	 * unacknowledged DATA chunk. ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (!asoc->peer.sack_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		asoc->peer.sack_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		/* Set the SACK delay timeout based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		 * SACK delay for the last transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		 * data was received from, or the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		 * for the association.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		if (trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			/* We will need a SACK for the next packet.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 				asoc->peer.sack_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 				trans->sackdelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			/* We will need a SACK for the next packet.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 				asoc->peer.sack_needed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 				asoc->sackdelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		/* Restart the SACK timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		__u32 old_a_rwnd = asoc->a_rwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		asoc->a_rwnd = asoc->rwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		sack = sctp_make_sack(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		if (!sack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			asoc->a_rwnd = old_a_rwnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		asoc->peer.sack_needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		asoc->peer.sack_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		/* Stop the SACK timer.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 				SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) /* When the T3-RTX timer expires, it calls this function to create the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * relevant state machine event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) void sctp_generate_t3_rtx_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct sctp_transport *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		from_timer(transport, t, T3_rtx_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct sctp_association *asoc = transport->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/* Check whether a task is in the sock.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		pr_debug("%s: sock is busy\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		/* Try again later.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			sctp_transport_hold(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/* Run through the state machine.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			   asoc->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			   asoc->ep, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			   transport, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		sk->sk_err = -error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	sctp_transport_put(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) /* This is a sa interface for producing timeout events.  It works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * for timeouts which use the association as their parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static void sctp_generate_timeout_event(struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 					enum sctp_event_timeout timeout_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		pr_debug("%s: sock is busy: timer %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			 timeout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		/* Try again later.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			sctp_association_hold(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	/* Is this association really dead and just waiting around for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 * the timer to let go of the reference?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (asoc->base.dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	/* Run through the state machine.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			   SCTP_ST_TIMEOUT(timeout_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			   asoc->state, asoc->ep, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			   (void *)timeout_type, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		sk->sk_err = -error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	sctp_association_put(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) static void sctp_generate_t1_cookie_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static void sctp_generate_t1_init_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static void sctp_generate_t2_shutdown_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) static void sctp_generate_t4_rto_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		from_timer(asoc, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			   timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	sctp_generate_timeout_event(asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 				    SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) } /* sctp_generate_t5_shutdown_guard_event() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static void sctp_generate_autoclose_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) /* Generate a heart beat event.  If the sock is busy, reschedule.   Make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * sure that the transport is still valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) void sctp_generate_heartbeat_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct sctp_transport *transport = from_timer(transport, t, hb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct sctp_association *asoc = transport->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	u32 elapsed, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		pr_debug("%s: sock is busy\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		/* Try again later.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			sctp_transport_hold(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	/* Check if we should still send the heartbeat or reschedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	elapsed = jiffies - transport->last_time_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	timeout = sctp_transport_timeout(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (elapsed < timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		elapsed = timeout - elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			sctp_transport_hold(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			   asoc->state, asoc->ep, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			   transport, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		sk->sk_err = -error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	sctp_transport_put(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) /* Handle the timeout of the ICMP protocol unreachable timer.  Trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  * the correct state machine transition that will close the association.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) void sctp_generate_proto_unreach_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct sctp_transport *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		from_timer(transport, t, proto_unreach_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	struct sctp_association *asoc = transport->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		pr_debug("%s: sock is busy\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		/* Try again later.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		if (!mod_timer(&transport->proto_unreach_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 				jiffies + (HZ/20)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			sctp_transport_hold(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	/* Is this structure just waiting around for us to actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 * get destroyed?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (asoc->base.dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	sctp_do_sm(net, SCTP_EVENT_T_OTHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		   SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	sctp_transport_put(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  /* Handle the timeout of the RE-CONFIG timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) void sctp_generate_reconf_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct sctp_transport *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		from_timer(transport, t, reconf_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	struct sctp_association *asoc = transport->asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (sock_owned_by_user(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		pr_debug("%s: sock is busy\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		/* Try again later.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			sctp_transport_hold(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			   asoc->state, asoc->ep, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			   transport, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		sk->sk_err = -error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	sctp_transport_put(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) /* Inject a SACK Timeout event into the state machine.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) static void sctp_generate_sack_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	struct sctp_association *asoc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	[SCTP_EVENT_TIMEOUT_NONE] =		NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	[SCTP_EVENT_TIMEOUT_T1_COOKIE] =	sctp_generate_t1_cookie_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	[SCTP_EVENT_TIMEOUT_T1_INIT] =		sctp_generate_t1_init_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =	sctp_generate_t2_shutdown_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	[SCTP_EVENT_TIMEOUT_T3_RTX] =		NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	[SCTP_EVENT_TIMEOUT_T4_RTO] =		sctp_generate_t4_rto_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 					sctp_generate_t5_shutdown_guard_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	[SCTP_EVENT_TIMEOUT_HEARTBEAT] =	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	[SCTP_EVENT_TIMEOUT_RECONF] =		NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	[SCTP_EVENT_TIMEOUT_SACK] =		sctp_generate_sack_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =	sctp_generate_autoclose_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) /* RFC 2960 8.2 Path Failure Detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * When its peer endpoint is multi-homed, an endpoint should keep a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * error counter for each of the destination transport addresses of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  * peer endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  * Each time the T3-rtx timer expires on any address, or when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * the error counter of that destination address will be incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * When the value in the error counter exceeds the protocol parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * 'Path.Max.Retrans' of that destination address, the endpoint should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * mark the destination transport address as inactive, and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * notification SHOULD be sent to the upper layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 					 struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 					 struct sctp_transport *transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 					 int is_hb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	/* The check for association's overall error counter exceeding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	 * threshold is done in the state function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	/* We are here due to a timer expiration.  If the timer was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	 * not a HEARTBEAT, then normal error tracking is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 * If the timer was a heartbeat, we only increment error counts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * when we already have an outstanding HEARTBEAT that has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 * been acknowledged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * Additionally, some tranport states inhibit error increments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (!is_hb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		asoc->overall_error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		if (transport->state != SCTP_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			transport->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 } else if (transport->hb_sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		if (transport->state != SCTP_UNCONFIRMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			asoc->overall_error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		if (transport->state != SCTP_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			transport->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	/* If the transport error count is greater than the pf_retrans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 * threshold, and less than pathmaxrtx, and if the current state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 * see SCTP Quick Failover Draft, section 5.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (asoc->base.net->sctp.pf_enable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	    transport->state == SCTP_ACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	    transport->error_count < transport->pathmaxrxt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	    transport->error_count > transport->pf_retrans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		sctp_assoc_control_transport(asoc, transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 					     SCTP_TRANSPORT_PF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 					     0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		/* Update the hb timer to resend a heartbeat every rto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		sctp_transport_reset_hb_timer(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (transport->state != SCTP_INACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	    (transport->error_count > transport->pathmaxrxt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		pr_debug("%s: association:%p transport addr:%pISpc failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			 __func__, asoc, &transport->ipaddr.sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		sctp_assoc_control_transport(asoc, transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 					     SCTP_TRANSPORT_DOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 					     SCTP_FAILED_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (transport->error_count > transport->ps_retrans &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	    asoc->peer.primary_path == transport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	    asoc->peer.active_path != transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		sctp_assoc_set_primary(asoc, asoc->peer.active_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/* E2) For the destination address for which the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	 * expires, set RTO <- RTO * 2 ("back off the timer").  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	 * maximum value discussed in rule C7 above (RTO.max) may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	 * used to provide an upper bound to this doubling operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	 * Special Case:  the first HB doesn't trigger exponential backoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	 * The first unacknowledged HB triggers it.  We do this with a flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	 * that indicates that we have an outstanding HB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (!is_hb || transport->hb_sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		sctp_max_rto(asoc, transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) /* Worker routine to handle INIT command failure.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 				 struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 				 unsigned int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 						(__u16)error, 0, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 						GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 				SCTP_ULPEVENT(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			SCTP_STATE(SCTP_STATE_CLOSED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* SEND_FAILED sent later when cleaning up the association. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	asoc->outqueue.error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) /* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				  struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				  enum sctp_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				  union sctp_subtype subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				  struct sctp_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 				  unsigned int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	struct sctp_ulpevent *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	struct sctp_chunk *abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	/* Cancel any partial delivery in progress. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 						(__u16)error, 0, 0, chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 						GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 						(__u16)error, 0, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 						GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 				SCTP_ULPEVENT(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (asoc->overall_error_count >= asoc->max_retrans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		abort = sctp_make_violation_max_retrans(asoc, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		if (abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 					SCTP_CHUNK(abort));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			SCTP_STATE(SCTP_STATE_CLOSED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/* SEND_FAILED sent later when cleaning up the association. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	asoc->outqueue.error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  * inside the cookie.  In reality, this is only used for INIT-ACK processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  * since all other cases use "temporary" associations and can do all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * their work in statefuns directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) static int sctp_cmd_process_init(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 				 struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 				 struct sctp_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 				 struct sctp_init_chunk *peer_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				 gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	/* We only process the init as a sideeffect in a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 * case.   This is when we process the INIT-ACK.   If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * fail during INIT processing (due to malloc problems),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * just return the error and stop processing the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) /* Helper function to break out starting up of heartbeat timers.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				     struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* Start a heartbeat timer for each transport on the association.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 * hold a reference on the transport to make sure none of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	 * the needed data structures go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		sctp_transport_reset_hb_timer(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 				    struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Stop all heartbeat timers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			transports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		if (del_timer(&t->hb_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			sctp_transport_put(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) /* Helper function to stop any pending T3-RTX timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 					struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			transports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		if (del_timer(&t->T3_rtx_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			sctp_transport_put(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) /* Helper function to handle the reception of an HEARTBEAT ACK.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 				  struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				  struct sctp_transport *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				  struct sctp_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct sctp_sender_hb_info *hbinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	int was_unconfirmed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	 * HEARTBEAT should clear the error counter of the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * transport address to which the HEARTBEAT was sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	t->error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 * Although RFC4960 specifies that the overall error count must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	 * be cleared when a HEARTBEAT ACK is received, we make an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	 * exception while in SHUTDOWN PENDING. If the peer keeps its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 * window shut forever, we may never be able to transmit our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 * outstanding data and rely on the retransmission limit be reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	 * to shutdown the association.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		t->asoc->overall_error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	/* Clear the hb_sent flag to signal that we had a good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * acknowledgement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	t->hb_sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	/* Mark the destination transport address as active if it is not so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 * marked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		was_unconfirmed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 					     SCTP_HEARTBEAT_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	if (t->state == SCTP_PF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 					     SCTP_HEARTBEAT_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/* HB-ACK was received for a the proper HB.  Consider this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	 * forward progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (t->dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		sctp_transport_dst_confirm(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* The receiver of the HEARTBEAT ACK should also perform an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	 * RTT measurement for that destination transport address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 * using the time value carried in the HEARTBEAT ACK chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * If the transport's rto_pending variable has been cleared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 * it was most likely due to a retransmit.  However, we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 * to re-enable it to properly update the rto.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (t->rto_pending == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		t->rto_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	/* Update the heartbeat timer.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	sctp_transport_reset_hb_timer(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (was_unconfirmed && asoc->peer.transport_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		sctp_transport_immediate_rtx(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) /* Helper function to process the process SACK command.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 				 struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 				 struct sctp_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (sctp_outq_sack(&asoc->outqueue, chunk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		/* There are no more TSNs awaiting SACK.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 				 asoc->state, asoc->ep, asoc, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				 GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  * the transport for a shutdown chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			      struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			      struct sctp_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (chunk->transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		t = chunk->transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		t = sctp_assoc_choose_alter_transport(asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 					      asoc->shutdown_last_sent_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		chunk->transport = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	asoc->shutdown_last_sent_to = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 				  struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 				  struct sctp_association *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct net *net = asoc->base.net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct sctp_chunk *abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (!sctp_assoc_update(asoc, new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (abort) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			SCTP_PERR(SCTP_ERROR_RSRC_LOW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) /* Helper function to change the state of an association. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			       struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			       enum sctp_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	asoc->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (sctp_style(sk, TCP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		/* Change the sk->sk_state of a TCP-style socket that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		 * successfully completed a connect() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		    sctp_sstate(sk, ESTABLISHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			inet_sk_set_state(sk, SCTP_SS_CLOSING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			sk->sk_shutdown |= RCV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (sctp_state(asoc, COOKIE_WAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		/* Reset init timeouts since they may have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		 * increased due to timer expirations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 						asoc->rto_initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 						asoc->rto_initial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (sctp_state(asoc, ESTABLISHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		kfree(asoc->peer.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		asoc->peer.cookie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if (sctp_state(asoc, ESTABLISHED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	    sctp_state(asoc, CLOSED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	    sctp_state(asoc, SHUTDOWN_RECEIVED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		/* Wake up any processes waiting in the asoc's wait queue in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		if (waitqueue_active(&asoc->wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			wake_up_interruptible(&asoc->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		/* Wake up any processes waiting in the sk's sleep queue of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		 * a TCP-style or UDP-style peeled-off socket in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		 * sctp_wait_for_accept() or sctp_wait_for_packet().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		 * For a UDP-style socket, the waiters are woken up by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		 * notifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		if (!sctp_style(sk, UDP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (sctp_state(asoc, SHUTDOWN_PENDING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	    !sctp_outq_is_empty(&asoc->outqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) /* Helper function to delete an association. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/* If it is a non-temporary association belonging to a TCP-style
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	 * listening socket that is not closed, do not free it so that accept()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * can pick it up later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	    (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	sctp_association_free(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  * ADDIP Section 4.1 ASCONF Chunk Procedures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * A4) Start a T-4 RTO timer, using the RTO value of the selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * destination address (we use active path instead of primary path just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * because primary path may be inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			      struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			      struct sctp_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	chunk->transport = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) /* Process an incoming Operation Error Chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				   struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 				   struct sctp_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct sctp_errhdr *err_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct sctp_ulpevent *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	while (chunk->chunk_end > chunk->skb->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 						     GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		switch (err_hdr->cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		case SCTP_ERROR_UNKNOWN_CHUNK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			struct sctp_chunkhdr *unk_chunk_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			unk_chunk_hdr = (struct sctp_chunkhdr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 							err_hdr->variable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			switch (unk_chunk_hdr->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			/* ADDIP 4.1 A9) If the peer responds to an ASCONF with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			 * an ERROR chunk reporting that it did not recognized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			 * the ASCONF chunk type, the sender of the ASCONF MUST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			 * NOT send any further ASCONF chunks and MUST stop its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			 * T-4 timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			case SCTP_CID_ASCONF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				if (asoc->peer.asconf_capable == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				asoc->peer.asconf_capable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 				sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 					SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* Helper function to remove the association non-primary peer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * transports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct list_head *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		t = list_entry(pos, struct sctp_transport, transports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		if (!sctp_cmp_addr_exact(&t->ipaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 					 &asoc->peer.primary_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			sctp_assoc_rm_peer(asoc, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Helper function to set sk_err on a 1-1 style socket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct sock *sk = asoc->base.sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (!sctp_style(sk, UDP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		sk->sk_err = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* Helper function to generate an association change event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 				  struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 				  u8 state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	struct sctp_ulpevent *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 					    asoc->c.sinit_num_ostreams,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 					    asoc->c.sinit_max_instreams,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 					    NULL, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 				  struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	struct sctp_ulpevent *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	if (ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* Helper function to generate an adaptation indication event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				    struct sctp_association *asoc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct sctp_ulpevent *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				     enum sctp_event_timeout timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				     char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	t = asoc->init_last_sent_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	asoc->init_err_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (t->init_sent_count > (asoc->init_cycle + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		asoc->timeouts[timer] *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		if (asoc->timeouts[timer] > asoc->max_init_timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			asoc->timeouts[timer] = asoc->max_init_timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		asoc->init_cycle++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			 " cycle:%d timeout:%ld\n", __func__, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			 asoc->init_err_counter, asoc->init_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			 asoc->timeouts[timer]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* Send the whole message, chunk by chunk, to the outqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * This way the whole message is queued up and bundling if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * encouraged for small fragments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static void sctp_cmd_send_msg(struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			      struct sctp_datamsg *msg, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct sctp_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	list_for_each_entry(chunk, &msg->chunks, frag_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		sctp_outq_tail(&asoc->outqueue, chunk, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /* These three macros allow us to pull the debugging code out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  * main flow of sctp_do_sm() to keep attention focused on the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * functionality there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) #define debug_pre_sfn() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype),   \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		 asoc, sctp_state_tbl[state], state_fn->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) #define debug_post_sfn() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		 sctp_status_tbl[status])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) #define debug_post_sfx() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  * This is the master state machine processing function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  * If you want to understand all of lksctp, this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  * good place to start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	       union sctp_subtype subtype, enum sctp_state state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	       struct sctp_endpoint *ep, struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	       void *event_arg, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	typedef const char *(printfn_t)(union sctp_subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	static printfn_t *table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	printfn_t *debug_fn  __attribute__ ((unused)) = table[event_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	const struct sctp_sm_table_entry *state_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	struct sctp_cmd_seq commands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	enum sctp_disposition status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	/* Look up the state function, run it, and then process the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	 * side effects.  These three steps are the heart of lksctp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	sctp_init_cmd_seq(&commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	debug_pre_sfn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	debug_post_sfn();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	error = sctp_side_effects(event_type, subtype, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 				  ep, &asoc, event_arg, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				  &commands, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	debug_post_sfx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /*****************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * This the master state function side effect processing function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  *****************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static int sctp_side_effects(enum sctp_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			     union sctp_subtype subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			     enum sctp_state state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			     struct sctp_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			     struct sctp_association **asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			     void *event_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			     enum sctp_disposition status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			     struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			     gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	/* FIXME - Most of the dispositions left today would be categorized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 * as "exceptional" dispositions.  For those dispositions, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 * may not be proper to run through any of the commands at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 * For example, the command interpreter might be run only with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 * disposition SCTP_DISPOSITION_CONSUME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 					       ep, *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 					       event_arg, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					       commands, gfp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	case SCTP_DISPOSITION_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		pr_debug("%s: ignored sctp protocol event - state:%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			 "event_type:%d, event_id:%d\n", __func__, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			 event_type, subtype.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	case SCTP_DISPOSITION_NOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		/* We ran out of memory, so we need to discard this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		 * packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		/* BUG--we should now recover some memory, probably by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		 * reneging...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	case SCTP_DISPOSITION_DELETE_TCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	case SCTP_DISPOSITION_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		/* This should now be a command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		*asoc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	case SCTP_DISPOSITION_CONSUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		 * We should no longer have much work to do here as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		 * real work has been done as explicit commands above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	case SCTP_DISPOSITION_VIOLATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		net_err_ratelimited("protocol violation state %d chunkid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 				    state, subtype.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	case SCTP_DISPOSITION_NOT_IMPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			state, event_type, subtype.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	case SCTP_DISPOSITION_BUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		pr_err("bug in state %d, event_type %d, event_id %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		       state, event_type, subtype.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		       status, state, event_type, subtype.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /********************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * 2nd Level Abstractions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  ********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* This is the side-effect interpreter.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static int sctp_cmd_interpreter(enum sctp_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				union sctp_subtype subtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				enum sctp_state state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 				struct sctp_endpoint *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				struct sctp_association *asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				void *event_arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				enum sctp_disposition status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 				struct sctp_cmd_seq *commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct sctp_sock *sp = sctp_sk(ep->base.sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct sctp_chunk *chunk = NULL, *new_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct sctp_packet *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct sctp_sackhdr sackh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct timer_list *timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct sctp_transport *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	struct sctp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	int local_cork = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	int force;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (SCTP_EVENT_T_TIMEOUT != event_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		chunk = event_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	/* Note:  This whole file is a huge candidate for rework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 * For example, each command could either have its own handler, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 * the loop would look like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 *     while (cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 *         cmd->handle(x, y, z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 * --jgrimm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	while (NULL != (cmd = sctp_next_cmd(commands))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		switch (cmd->verb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		case SCTP_CMD_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			/* Do nothing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		case SCTP_CMD_NEW_ASOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			/* Register a new association.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			if (local_cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 				sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				local_cork = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			/* Register with the endpoint.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			asoc = cmd->obj.asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			BUG_ON(asoc->peer.primary_path == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			sctp_endpoint_add_asoc(ep, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		case SCTP_CMD_UPDATE_ASSOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		       sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		       break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		case SCTP_CMD_PURGE_OUTQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		       sctp_outq_teardown(&asoc->outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		       break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		case SCTP_CMD_DELETE_TCB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			if (local_cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 				sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 				local_cork = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			/* Delete the current association.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			sctp_cmd_delete_tcb(commands, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			asoc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		case SCTP_CMD_NEW_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			/* Enter a new state.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			sctp_cmd_new_state(commands, asoc, cmd->obj.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		case SCTP_CMD_REPORT_TSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			/* Record the arrival of a TSN.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 						 cmd->obj.u32, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		case SCTP_CMD_REPORT_FWDTSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		case SCTP_CMD_PROCESS_FWDTSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			asoc->stream.si->handle_ftsn(&asoc->ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 						     cmd->obj.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		case SCTP_CMD_GEN_SACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			/* Generate a Selective ACK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			 * The argument tells us whether to just count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			 * the packet and MAYBE generate a SACK, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			 * force a SACK out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			force = cmd->obj.i32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			error = sctp_gen_sack(asoc, force, commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		case SCTP_CMD_PROCESS_SACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			/* Process an inbound SACK.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			error = sctp_cmd_process_sack(commands, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 						      cmd->obj.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		case SCTP_CMD_GEN_INIT_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			/* Generate an INIT ACK chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 						     0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			if (!new_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 				error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 					SCTP_CHUNK(new_obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		case SCTP_CMD_PEER_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			/* Process a unified INIT from the peer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			 * Note: Only used during INIT-ACK processing.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			 * there is an error just return to the outter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			 * layer which will bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			error = sctp_cmd_process_init(commands, asoc, chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 						      cmd->obj.init, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		case SCTP_CMD_GEN_COOKIE_ECHO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			/* Generate a COOKIE ECHO chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			new_obj = sctp_make_cookie_echo(asoc, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			if (!new_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				if (cmd->obj.chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 					sctp_chunk_free(cmd->obj.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 					SCTP_CHUNK(new_obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			/* If there is an ERROR chunk to be sent along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			 * the COOKIE_ECHO, send it, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			if (cmd->obj.chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 						SCTP_CHUNK(cmd->obj.chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			if (new_obj->transport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				new_obj->transport->init_sent_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				asoc->init_last_sent_to = new_obj->transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			/* FIXME - Eventually come up with a cleaner way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			 * enabling COOKIE-ECHO + DATA bundling during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			 * multihoming stale cookie scenarios, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			 * command plays with asoc->peer.retran_path to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			 * avoid the problem of sending the COOKIE-ECHO and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			 * DATA in different paths, which could result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			 * in the association being ABORTed if the DATA chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			 * is processed first by the server.  Checking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			 * init error counter simply causes this command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			 * to be executed only during failed attempts of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			 * association establishment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			if ((asoc->peer.retran_path !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			     asoc->peer.primary_path) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			    (asoc->init_err_counter > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 				sctp_add_cmd_sf(commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 						SCTP_CMD_FORCE_PRIM_RETRAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 						SCTP_NULL());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		case SCTP_CMD_GEN_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 			 * Reset error counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			asoc->overall_error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			/* Generate a SHUTDOWN chunk.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			new_obj = sctp_make_shutdown(asoc, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			if (!new_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 				error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 			sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 					SCTP_CHUNK(new_obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		case SCTP_CMD_CHUNK_ULP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			/* Send a chunk to the sockets layer.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				 __func__, cmd->obj.chunk, &asoc->ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			asoc->stream.si->ulpevent_data(&asoc->ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 						       cmd->obj.chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 						       GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		case SCTP_CMD_EVENT_ULP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			/* Send a notification to the sockets layer.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 				 __func__, cmd->obj.ulpevent, &asoc->ulpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			asoc->stream.si->enqueue_event(&asoc->ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 						       cmd->obj.ulpevent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		case SCTP_CMD_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			/* If an caller has not already corked, do cork. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			if (!asoc->outqueue.cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 				sctp_outq_cork(&asoc->outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 				local_cork = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			/* Send a chunk to our peer.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		case SCTP_CMD_SEND_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			/* Send a full packet to our peer.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			packet = cmd->obj.packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			sctp_packet_transmit(packet, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			sctp_ootb_pkt_free(packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		case SCTP_CMD_T1_RETRAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			/* Mark a transport for retransmission.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 					SCTP_RTXR_T1_RTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		case SCTP_CMD_RETRAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			/* Mark a transport for retransmission.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 					SCTP_RTXR_T3_RTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		case SCTP_CMD_ECN_CE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			/* Do delayed CE processing.   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		case SCTP_CMD_ECN_ECNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			/* Do delayed ECNE processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 							chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			if (new_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 				sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 						SCTP_CHUNK(new_obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		case SCTP_CMD_ECN_CWR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			/* Do delayed CWR processing.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		case SCTP_CMD_SETUP_T2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		case SCTP_CMD_TIMER_START_ONCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			timer = &asoc->timers[cmd->obj.to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			if (timer_pending(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		case SCTP_CMD_TIMER_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			timer = &asoc->timers[cmd->obj.to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			timeout = asoc->timeouts[cmd->obj.to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			BUG_ON(!timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			 * SCTP has a hard time with timer starts.  Because we process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			 * timer starts as side effects, it can be hard to tell if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			 * have already started a timer or not, which leads to BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			 * halts when we call add_timer. So here, instead of just starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			 * a timer, if the timer is already started, and just mod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			 * the timer with the shorter of the two expiration times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			if (!timer_pending(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 				sctp_association_hold(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			timer_reduce(timer, jiffies + timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		case SCTP_CMD_TIMER_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			timer = &asoc->timers[cmd->obj.to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			timeout = asoc->timeouts[cmd->obj.to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			if (!mod_timer(timer, jiffies + timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 				sctp_association_hold(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		case SCTP_CMD_TIMER_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 			timer = &asoc->timers[cmd->obj.to];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			if (del_timer(timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 				sctp_association_put(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			chunk = cmd->obj.chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			t = sctp_assoc_choose_alter_transport(asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 						asoc->init_last_sent_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			asoc->init_last_sent_to = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			chunk->transport = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 			t->init_sent_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 			/* Set the new transport as primary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			sctp_assoc_set_primary(asoc, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		case SCTP_CMD_INIT_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			/* Do the needed accounting and updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 			 * associated with restarting an initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			 * timer. Only multiply the timeout by two if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 			 * all transports have been tried at the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 			 * timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			sctp_cmd_t1_timer_update(asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 						SCTP_EVENT_TIMEOUT_T1_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 						"INIT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		case SCTP_CMD_COOKIEECHO_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			/* Do the needed accounting and updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			 * associated with restarting an initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 			 * timer. Only multiply the timeout by two if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			 * all transports have been tried at the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			 * timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			sctp_cmd_t1_timer_update(asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 						SCTP_EVENT_TIMEOUT_T1_COOKIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 						"COOKIE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			/* If we've sent any data bundled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			 * COOKIE-ECHO we need to resend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 					transports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 				sctp_retransmit_mark(&asoc->outqueue, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 					    SCTP_RTXR_T1_RTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 			sctp_add_cmd_sf(commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 					SCTP_CMD_TIMER_RESTART,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 					SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		case SCTP_CMD_INIT_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		case SCTP_CMD_ASSOC_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 			sctp_cmd_assoc_failed(commands, asoc, event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 					      subtype, chunk, cmd->obj.u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		case SCTP_CMD_INIT_COUNTER_INC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			asoc->init_err_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		case SCTP_CMD_INIT_COUNTER_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 			asoc->init_err_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			asoc->init_cycle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 					    transports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 				t->init_sent_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		case SCTP_CMD_REPORT_DUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 			sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 					     cmd->obj.u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		case SCTP_CMD_REPORT_BAD_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 			pr_debug("%s: vtag mismatch!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		case SCTP_CMD_STRIKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 			/* Mark one strike against a transport.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			sctp_do_8_2_transport_strike(commands, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 						    cmd->obj.transport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		case SCTP_CMD_TRANSPORT_IDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			t = cmd->obj.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		case SCTP_CMD_TRANSPORT_HB_SENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			t = cmd->obj.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			sctp_do_8_2_transport_strike(commands, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 						     t, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			t->hb_sent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		case SCTP_CMD_TRANSPORT_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			t = cmd->obj.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			sctp_cmd_transport_on(commands, asoc, t, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		case SCTP_CMD_HB_TIMERS_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 			sctp_cmd_hb_timers_start(commands, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		case SCTP_CMD_HB_TIMER_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 			t = cmd->obj.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			sctp_transport_reset_hb_timer(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		case SCTP_CMD_HB_TIMERS_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			sctp_cmd_hb_timers_stop(commands, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		case SCTP_CMD_REPORT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			error = cmd->obj.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		case SCTP_CMD_PROCESS_CTSN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			/* Dummy up a SACK for processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			sackh.cum_tsn_ack = cmd->obj.be32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			sackh.a_rwnd = htonl(asoc->peer.rwnd +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 					     asoc->outqueue.outstanding_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			sackh.num_gap_ack_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			sackh.num_dup_tsns = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			chunk->subh.sack_hdr = &sackh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 					SCTP_CHUNK(chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		case SCTP_CMD_DISCARD_PACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			/* We need to discard the whole packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			 * Uncork the queue since there might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			 * responses pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			chunk->pdiscard = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			if (asoc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 				local_cork = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		case SCTP_CMD_RTO_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			t = cmd->obj.transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			t->rto_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		case SCTP_CMD_PART_DELIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		case SCTP_CMD_RENEGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			asoc->stream.si->renege_events(&asoc->ulpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 						       cmd->obj.chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 						       GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		case SCTP_CMD_SETUP_T4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		case SCTP_CMD_PROCESS_OPERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 			sctp_cmd_process_operr(commands, asoc, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		case SCTP_CMD_CLEAR_INIT_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			asoc->peer.i.init_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		case SCTP_CMD_DEL_NON_PRIMARY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			sctp_cmd_del_non_primary(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		case SCTP_CMD_T3_RTX_TIMERS_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			sctp_cmd_t3_rtx_timers_stop(commands, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		case SCTP_CMD_FORCE_PRIM_RETRAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 			t = asoc->peer.retran_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			asoc->peer.retran_path = asoc->peer.primary_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			local_cork = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 			asoc->peer.retran_path = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		case SCTP_CMD_SET_SK_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			sctp_cmd_set_sk_err(asoc, cmd->obj.error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		case SCTP_CMD_ASSOC_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			sctp_cmd_assoc_change(commands, asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 					      cmd->obj.u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		case SCTP_CMD_ADAPTATION_IND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			sctp_cmd_adaptation_ind(commands, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		case SCTP_CMD_PEER_NO_AUTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			sctp_cmd_peer_no_auth(commands, asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		case SCTP_CMD_ASSOC_SHKEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			error = sctp_auth_asoc_init_active_key(asoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 						GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		case SCTP_CMD_UPDATE_INITTAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			asoc->peer.i.init_tag = cmd->obj.u32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		case SCTP_CMD_SEND_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			if (!asoc->outqueue.cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 				sctp_outq_cork(&asoc->outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 				local_cork = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		case SCTP_CMD_PURGE_ASCONF_QUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			sctp_asconf_queue_teardown(asoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		case SCTP_CMD_SET_ASOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			if (asoc && local_cork) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 				sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 				local_cork = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			asoc = cmd->obj.asoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			pr_warn("Impossible command: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 				cmd->verb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			cmd = sctp_next_cmd(commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			while (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 				if (cmd->verb == SCTP_CMD_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 					sctp_chunk_free(cmd->obj.chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 				cmd = sctp_next_cmd(commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	/* If this is in response to a received chunk, wait until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	 * we are done with the packet to open the queue so that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	 * send multiple packets in response to a single request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		if (chunk->end_of_packet || chunk->singleton)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	} else if (local_cork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		sctp_outq_uncork(&asoc->outqueue, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	if (sp->data_ready_signalled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		sp->data_ready_signalled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }